repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIdentityProviders.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Test;
import org.junit.Before;
import org.junit.After;
import java.util.List;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.conf.Configuration;
public class TestIdentityProviders {
public class FakeSchedulable implements Schedulable {
public FakeSchedulable() {
}
public UserGroupInformation getUserGroupInformation() {
try {
return UserGroupInformation.getCurrentUser();
} catch (IOException e) {
return null;
}
}
}
@Test
public void testPluggableIdentityProvider() {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
"org.apache.hadoop.ipc.UserIdentityProvider");
List<IdentityProvider> providers = conf.getInstances(
CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
IdentityProvider.class);
assertTrue(providers.size() == 1);
IdentityProvider ip = providers.get(0);
assertNotNull(ip);
assertEquals(ip.getClass(), UserIdentityProvider.class);
}
@Test
public void testUserIdentityProvider() throws IOException {
UserIdentityProvider uip = new UserIdentityProvider();
String identity = uip.makeIdentity(new FakeSchedulable());
// Get our username
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String username = ugi.getUserName();
assertEquals(username, identity);
}
}
| 2,818 | 30.674157 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRetryCacheMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.junit.Test;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.mockito.Mockito.*;
/**
* Tests for {@link RetryCacheMetrics}
*/
public class TestRetryCacheMetrics {
static final String cacheName = "NameNodeRetryCache";
@Test
public void testNames() {
RetryCache cache = mock(RetryCache.class);
when(cache.getCacheName()).thenReturn(cacheName);
RetryCacheMetrics metrics = RetryCacheMetrics.create(cache);
metrics.incrCacheHit();
metrics.incrCacheCleared();
metrics.incrCacheCleared();
metrics.incrCacheUpdated();
metrics.incrCacheUpdated();
metrics.incrCacheUpdated();
checkMetrics(1, 2, 3);
}
private void checkMetrics(long hit, long cleared, long updated) {
MetricsRecordBuilder rb = getMetrics("RetryCache." + cacheName);
assertCounter("CacheHit", hit, rb);
assertCounter("CacheCleared", cleared, rb);
assertCounter("CacheUpdated", updated, rb);
}
}
| 1,991 | 32.2 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.KERBEROS;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.SIMPLE;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.TOKEN;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.security.Security;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.AuthorizeCallback;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SaslInputStream;
import org.apache.hadoop.security.SaslPlainServer;
import org.apache.hadoop.security.SaslPropertiesResolver;
import org.apache.hadoop.security.SaslRpcClient;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.TestUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
/** Unit tests for using Sasl over RPC. */
@RunWith(Parameterized.class)
public class TestSaslRPC {
@Parameters
public static Collection<Object[]> data() {
Collection<Object[]> params = new ArrayList<Object[]>();
for (QualityOfProtection qop : QualityOfProtection.values()) {
params.add(new Object[]{ new QualityOfProtection[]{qop},qop, null });
}
params.add(new Object[]{ new QualityOfProtection[]{
QualityOfProtection.PRIVACY,QualityOfProtection.AUTHENTICATION },
QualityOfProtection.PRIVACY, null});
params.add(new Object[]{ new QualityOfProtection[]{
QualityOfProtection.PRIVACY,QualityOfProtection.AUTHENTICATION },
QualityOfProtection.AUTHENTICATION ,
"org.apache.hadoop.ipc.TestSaslRPC$AuthSaslPropertiesResolver" });
return params;
}
QualityOfProtection[] qop;
QualityOfProtection expectedQop;
String saslPropertiesResolver ;
public TestSaslRPC(QualityOfProtection[] qop,
QualityOfProtection expectedQop,
String saslPropertiesResolver) {
this.qop=qop;
this.expectedQop = expectedQop;
this.saslPropertiesResolver = saslPropertiesResolver;
}
private static final String ADDRESS = "0.0.0.0";
public static final Log LOG =
LogFactory.getLog(TestSaslRPC.class);
static final String ERROR_MESSAGE = "Token is invalid";
static final String SERVER_PRINCIPAL_KEY = "test.ipc.server.principal";
static final String SERVER_KEYTAB_KEY = "test.ipc.server.keytab";
static final String SERVER_PRINCIPAL_1 = "p1/foo@BAR";
static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR";
private static Configuration conf;
// If this is set to true AND the auth-method is not simple, secretManager
// will be enabled.
static Boolean enableSecretManager = null;
// If this is set to true, secretManager will be forecefully enabled
// irrespective of auth-method.
static Boolean forceSecretManager = null;
static Boolean clientFallBackToSimpleAllowed = true;
static enum UseToken {
NONE(),
VALID(),
INVALID(),
OTHER();
}
@BeforeClass
public static void setupKerb() {
System.setProperty("java.security.krb5.kdc", "");
System.setProperty("java.security.krb5.realm", "NONE");
Security.addProvider(new SaslPlainServer.SecurityProvider());
}
@Before
public void setup() {
LOG.info("---------------------------------");
LOG.info("Testing QOP:"+ getQOPNames(qop));
LOG.info("---------------------------------");
conf = new Configuration();
// the specific tests for kerberos will enable kerberos. forcing it
// for all tests will cause tests to fail if the user has a TGT
conf.set(HADOOP_SECURITY_AUTHENTICATION, SIMPLE.toString());
conf.set(HADOOP_RPC_PROTECTION, getQOPNames(qop));
if (saslPropertiesResolver != null){
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
saslPropertiesResolver);
}
UserGroupInformation.setConfiguration(conf);
enableSecretManager = null;
forceSecretManager = null;
clientFallBackToSimpleAllowed = true;
}
static String getQOPNames (QualityOfProtection[] qops){
StringBuilder sb = new StringBuilder();
int i = 0;
for (QualityOfProtection qop:qops){
sb.append(org.apache.hadoop.util.StringUtils.toLowerCase(qop.name()));
if (++i < qops.length){
sb.append(",");
}
}
return sb.toString();
}
static {
((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcClient.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SecurityUtil.LOG).getLogger().setLevel(Level.ALL);
}
public static class TestTokenIdentifier extends TokenIdentifier {
private Text tokenid;
private Text realUser;
final static Text KIND_NAME = new Text("test.token");
public TestTokenIdentifier() {
this(new Text(), new Text());
}
public TestTokenIdentifier(Text tokenid) {
this(tokenid, new Text());
}
public TestTokenIdentifier(Text tokenid, Text realUser) {
this.tokenid = tokenid == null ? new Text() : tokenid;
this.realUser = realUser == null ? new Text() : realUser;
}
@Override
public Text getKind() {
return KIND_NAME;
}
@Override
public UserGroupInformation getUser() {
if (realUser.toString().isEmpty()) {
return UserGroupInformation.createRemoteUser(tokenid.toString());
} else {
UserGroupInformation realUgi = UserGroupInformation
.createRemoteUser(realUser.toString());
return UserGroupInformation
.createProxyUser(tokenid.toString(), realUgi);
}
}
@Override
public void readFields(DataInput in) throws IOException {
tokenid.readFields(in);
realUser.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
tokenid.write(out);
realUser.write(out);
}
}
public static class TestTokenSecretManager extends
SecretManager<TestTokenIdentifier> {
@Override
public byte[] createPassword(TestTokenIdentifier id) {
return id.getBytes();
}
@Override
public byte[] retrievePassword(TestTokenIdentifier id)
throws InvalidToken {
return id.getBytes();
}
@Override
public TestTokenIdentifier createIdentifier() {
return new TestTokenIdentifier();
}
}
public static class BadTokenSecretManager extends TestTokenSecretManager {
@Override
public byte[] retrievePassword(TestTokenIdentifier id)
throws InvalidToken {
throw new InvalidToken(ERROR_MESSAGE);
}
}
public static class TestTokenSelector implements
TokenSelector<TestTokenIdentifier> {
@SuppressWarnings("unchecked")
@Override
public Token<TestTokenIdentifier> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens) {
if (service == null) {
return null;
}
for (Token<? extends TokenIdentifier> token : tokens) {
if (TestTokenIdentifier.KIND_NAME.equals(token.getKind())
&& service.equals(token.getService())) {
return (Token<TestTokenIdentifier>) token;
}
}
return null;
}
}
@KerberosInfo(
serverPrincipal = SERVER_PRINCIPAL_KEY)
@TokenInfo(TestTokenSelector.class)
public interface TestSaslProtocol extends TestRPC.TestProtocol {
public AuthMethod getAuthMethod() throws IOException;
public String getAuthUser() throws IOException;
}
public static class TestSaslImpl extends TestRPC.TestImpl implements
TestSaslProtocol {
@Override
public AuthMethod getAuthMethod() throws IOException {
return UserGroupInformation.getCurrentUser()
.getAuthenticationMethod().getAuthMethod();
}
@Override
public String getAuthUser() throws IOException {
return UserGroupInformation.getCurrentUser().getUserName();
}
}
public static class CustomSecurityInfo extends SecurityInfo {
@Override
public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
return new KerberosInfo() {
@Override
public Class<? extends Annotation> annotationType() {
return null;
}
@Override
public String serverPrincipal() {
return SERVER_PRINCIPAL_KEY;
}
@Override
public String clientPrincipal() {
return null;
}
};
}
@Override
public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
return new TokenInfo() {
@Override
public Class<? extends TokenSelector<? extends
TokenIdentifier>> value() {
return TestTokenSelector.class;
}
@Override
public Class<? extends Annotation> annotationType() {
return null;
}
};
}
}
@Test
public void testDigestRpc() throws Exception {
TestTokenSecretManager sm = new TestTokenSecretManager();
final Server server = new RPC.Builder(conf)
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
doDigestRpc(server, sm);
}
@Test
public void testDigestRpcWithoutAnnotation() throws Exception {
TestTokenSecretManager sm = new TestTokenSecretManager();
try {
SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo());
final Server server = new RPC.Builder(conf)
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5)
.setVerbose(true).setSecretManager(sm).build();
doDigestRpc(server, sm);
} finally {
SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]);
}
}
@Test
public void testErrorMessage() throws Exception {
BadTokenSecretManager sm = new BadTokenSecretManager();
final Server server = new RPC.Builder(conf)
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
boolean succeeded = false;
try {
doDigestRpc(server, sm);
} catch (RemoteException e) {
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
assertEquals(ERROR_MESSAGE, e.getLocalizedMessage());
assertTrue(e.unwrapRemoteException() instanceof InvalidToken);
succeeded = true;
}
assertTrue(succeeded);
}
private void doDigestRpc(Server server, TestTokenSecretManager sm
) throws Exception {
server.start();
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
.getUserName()));
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
sm);
SecurityUtil.setTokenService(token, addr);
current.addToken(token);
TestSaslProtocol proxy = null;
try {
proxy = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, conf);
AuthMethod authMethod = proxy.getAuthMethod();
assertEquals(TOKEN, authMethod);
//QOP must be auth
assertEquals(expectedQop.saslQop,
RPC.getConnectionIdForProxy(proxy).getSaslQop());
proxy.ping();
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@Test
public void testPingInterval() throws Exception {
Configuration newConf = new Configuration(conf);
newConf.set(SERVER_PRINCIPAL_KEY, SERVER_PRINCIPAL_1);
conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,
CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);
// set doPing to true
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
ConnectionId remoteId = ConnectionId.getConnectionId(
new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
remoteId.getPingInterval());
// set doPing to false
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, false);
remoteId = ConnectionId.getConnectionId(
new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
assertEquals(0, remoteId.getPingInterval());
}
@Test
public void testPerConnectionConf() throws Exception {
TestTokenSecretManager sm = new TestTokenSecretManager();
final Server server = new RPC.Builder(conf)
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
server.start();
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
.getUserName()));
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
sm);
SecurityUtil.setTokenService(token, addr);
current.addToken(token);
Configuration newConf = new Configuration(conf);
newConf.set(CommonConfigurationKeysPublic.
HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, "");
Client client = null;
TestSaslProtocol proxy1 = null;
TestSaslProtocol proxy2 = null;
TestSaslProtocol proxy3 = null;
int timeouts[] = {111222, 3333333};
try {
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]);
proxy1 = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, newConf);
proxy1.getAuthMethod();
client = WritableRpcEngine.getClient(newConf);
Set<ConnectionId> conns = client.getConnectionIds();
assertEquals("number of connections in cache is wrong", 1, conns.size());
// same conf, connection should be re-used
proxy2 = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, newConf);
proxy2.getAuthMethod();
assertEquals("number of connections in cache is wrong", 1, conns.size());
// different conf, new connection should be set up
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[1]);
proxy3 = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, newConf);
proxy3.getAuthMethod();
assertEquals("number of connections in cache is wrong", 2, conns.size());
// now verify the proxies have the correct connection ids and timeouts
ConnectionId[] connsArray = {
RPC.getConnectionIdForProxy(proxy1),
RPC.getConnectionIdForProxy(proxy2),
RPC.getConnectionIdForProxy(proxy3)
};
assertEquals(connsArray[0], connsArray[1]);
assertEquals(connsArray[0].getMaxIdleTime(), timeouts[0]);
assertFalse(connsArray[0].equals(connsArray[2]));
assertNotSame(connsArray[2].getMaxIdleTime(), timeouts[1]);
} finally {
server.stop();
// this is dirty, but clear out connection cache for next run
if (client != null) {
client.getConnectionIds().clear();
}
if (proxy1 != null) RPC.stopProxy(proxy1);
if (proxy2 != null) RPC.stopProxy(proxy2);
if (proxy3 != null) RPC.stopProxy(proxy3);
}
}
static void testKerberosRpc(String principal, String keytab) throws Exception {
final Configuration newConf = new Configuration(conf);
newConf.set(SERVER_PRINCIPAL_KEY, principal);
newConf.set(SERVER_KEYTAB_KEY, keytab);
SecurityUtil.login(newConf, SERVER_KEYTAB_KEY, SERVER_PRINCIPAL_KEY);
TestUserGroupInformation.verifyLoginMetrics(1, 0);
UserGroupInformation current = UserGroupInformation.getCurrentUser();
System.out.println("UGI: " + current);
Server server = new RPC.Builder(newConf)
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.build();
TestSaslProtocol proxy = null;
server.start();
InetSocketAddress addr = NetUtils.getConnectAddress(server);
try {
proxy = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, newConf);
proxy.ping();
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
System.out.println("Test is successful.");
}
@Test
public void testSaslPlainServer() throws IOException {
runNegotiation(
new TestPlainCallbacks.Client("user", "pass"),
new TestPlainCallbacks.Server("user", "pass"));
}
@Test
public void testSaslPlainServerBadPassword() {
SaslException e = null;
try {
runNegotiation(
new TestPlainCallbacks.Client("user", "pass1"),
new TestPlainCallbacks.Server("user", "pass2"));
} catch (SaslException se) {
e = se;
}
assertNotNull(e);
assertEquals("PLAIN auth failed: wrong password", e.getMessage());
}
private void runNegotiation(CallbackHandler clientCbh,
CallbackHandler serverCbh)
throws SaslException {
String mechanism = AuthMethod.PLAIN.getMechanismName();
SaslClient saslClient = Sasl.createSaslClient(
new String[]{ mechanism }, null, null, null, null, clientCbh);
assertNotNull(saslClient);
SaslServer saslServer = Sasl.createSaslServer(
mechanism, null, "localhost", null, serverCbh);
assertNotNull("failed to find PLAIN server", saslServer);
byte[] response = saslClient.evaluateChallenge(new byte[0]);
assertNotNull(response);
assertTrue(saslClient.isComplete());
response = saslServer.evaluateResponse(response);
assertNull(response);
assertTrue(saslServer.isComplete());
assertNotNull(saslServer.getAuthorizationID());
}
static class TestPlainCallbacks {
public static class Client implements CallbackHandler {
String user = null;
String password = null;
Client(String user, String password) {
this.user = user;
this.password = password;
}
@Override
public void handle(Callback[] callbacks)
throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
((NameCallback) callback).setName(user);
} else if (callback instanceof PasswordCallback) {
((PasswordCallback) callback).setPassword(password.toCharArray());
} else {
throw new UnsupportedCallbackException(callback,
"Unrecognized SASL PLAIN Callback");
}
}
}
}
public static class Server implements CallbackHandler {
String user = null;
String password = null;
Server(String user, String password) {
this.user = user;
this.password = password;
}
@Override
public void handle(Callback[] callbacks)
throws UnsupportedCallbackException, SaslException {
NameCallback nc = null;
PasswordCallback pc = null;
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
nc = (NameCallback)callback;
assertEquals(user, nc.getName());
} else if (callback instanceof PasswordCallback) {
pc = (PasswordCallback)callback;
if (!password.equals(new String(pc.getPassword()))) {
throw new IllegalArgumentException("wrong password");
}
} else if (callback instanceof AuthorizeCallback) {
ac = (AuthorizeCallback)callback;
assertEquals(user, ac.getAuthorizationID());
assertEquals(user, ac.getAuthenticationID());
ac.setAuthorized(true);
ac.setAuthorizedID(ac.getAuthenticationID());
} else {
throw new UnsupportedCallbackException(callback,
"Unsupported SASL PLAIN Callback");
}
}
assertNotNull(nc);
assertNotNull(pc);
assertNotNull(ac);
}
}
}
private static Pattern BadToken =
Pattern.compile(".*DIGEST-MD5: digest response format violation.*");
private static Pattern KrbFailed =
Pattern.compile(".*Failed on local exception:.* " +
"Failed to specify server's Kerberos principal name.*");
private static Pattern Denied(AuthMethod method) {
return Pattern.compile(".*RemoteException.*AccessControlException.*: "
+ method + " authentication is not enabled.*");
}
private static Pattern No(AuthMethod ... method) {
String methods = StringUtils.join(method, ",\\s*");
return Pattern.compile(".*Failed on local exception:.* " +
"Client cannot authenticate via:\\[" + methods + "\\].*");
}
private static Pattern NoTokenAuth =
Pattern.compile(".*IllegalArgumentException: " +
"TOKEN authentication requires a secret manager");
private static Pattern NoFallback =
Pattern.compile(".*Failed on local exception:.* " +
"Server asks us to fall back to SIMPLE auth, " +
"but this client is configured to only allow secure connections.*");
/*
* simple server
*/
@Test
public void testSimpleServer() throws Exception {
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE));
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, UseToken.OTHER));
// SASL methods are normally reverted to SIMPLE
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE));
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
}
@Test
public void testNoClientFallbackToSimple()
throws Exception {
clientFallBackToSimpleAllowed = false;
// tokens are irrelevant w/o secret manager enabled
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE));
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, UseToken.OTHER));
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, UseToken.VALID));
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, UseToken.INVALID));
// A secure client must not fallback
assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE));
assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE, UseToken.VALID));
assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE, UseToken.INVALID));
// Now set server to simple and also force the secret-manager. Now server
// should have both simple and token enabled.
forceSecretManager = true;
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE));
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, UseToken.OTHER));
assertAuthEquals(TOKEN, getAuthMethod(SIMPLE, SIMPLE, UseToken.VALID));
assertAuthEquals(BadToken, getAuthMethod(SIMPLE, SIMPLE, UseToken.INVALID));
// A secure client must not fallback
assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE));
assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, SIMPLE, UseToken.VALID));
assertAuthEquals(BadToken, getAuthMethod(KERBEROS, SIMPLE, UseToken.INVALID));
// doesn't try SASL
assertAuthEquals(Denied(SIMPLE), getAuthMethod(SIMPLE, TOKEN));
// does try SASL
assertAuthEquals(No(TOKEN), getAuthMethod(SIMPLE, TOKEN, UseToken.OTHER));
assertAuthEquals(TOKEN, getAuthMethod(SIMPLE, TOKEN, UseToken.VALID));
assertAuthEquals(BadToken, getAuthMethod(SIMPLE, TOKEN, UseToken.INVALID));
assertAuthEquals(No(TOKEN), getAuthMethod(KERBEROS, TOKEN));
assertAuthEquals(No(TOKEN), getAuthMethod(KERBEROS, TOKEN, UseToken.OTHER));
assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, TOKEN, UseToken.VALID));
assertAuthEquals(BadToken, getAuthMethod(KERBEROS, TOKEN, UseToken.INVALID));
}
@Test
public void testSimpleServerWithTokens() throws Exception {
// Client not using tokens
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE));
// SASL methods are reverted to SIMPLE
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE));
// Use tokens. But tokens are ignored because client is reverted to simple
// due to server not using tokens
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.VALID));
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
// server isn't really advertising tokens
enableSecretManager = true;
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, UseToken.VALID));
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, UseToken.OTHER));
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.VALID));
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
// now the simple server takes tokens
forceSecretManager = true;
assertAuthEquals(TOKEN, getAuthMethod(SIMPLE, SIMPLE, UseToken.VALID));
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, UseToken.OTHER));
assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, SIMPLE, UseToken.VALID));
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
}
@Test
public void testSimpleServerWithInvalidTokens() throws Exception {
// Tokens are ignored because client is reverted to simple
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, UseToken.INVALID));
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.INVALID));
enableSecretManager = true;
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, UseToken.INVALID));
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.INVALID));
forceSecretManager = true;
assertAuthEquals(BadToken, getAuthMethod(SIMPLE, SIMPLE, UseToken.INVALID));
assertAuthEquals(BadToken, getAuthMethod(KERBEROS, SIMPLE, UseToken.INVALID));
}
/*
* token server
*/
@Test
public void testTokenOnlyServer() throws Exception {
// simple client w/o tokens won't try SASL, so server denies
assertAuthEquals(Denied(SIMPLE), getAuthMethod(SIMPLE, TOKEN));
assertAuthEquals(No(TOKEN), getAuthMethod(SIMPLE, TOKEN, UseToken.OTHER));
assertAuthEquals(No(TOKEN), getAuthMethod(KERBEROS, TOKEN));
assertAuthEquals(No(TOKEN), getAuthMethod(KERBEROS, TOKEN, UseToken.OTHER));
}
@Test
public void testTokenOnlyServerWithTokens() throws Exception {
assertAuthEquals(TOKEN, getAuthMethod(SIMPLE, TOKEN, UseToken.VALID));
assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, TOKEN, UseToken.VALID));
enableSecretManager = false;
assertAuthEquals(NoTokenAuth, getAuthMethod(SIMPLE, TOKEN, UseToken.VALID));
assertAuthEquals(NoTokenAuth, getAuthMethod(KERBEROS, TOKEN, UseToken.VALID));
}
@Test
public void testTokenOnlyServerWithInvalidTokens() throws Exception {
assertAuthEquals(BadToken, getAuthMethod(SIMPLE, TOKEN, UseToken.INVALID));
assertAuthEquals(BadToken, getAuthMethod(KERBEROS, TOKEN, UseToken.INVALID));
enableSecretManager = false;
assertAuthEquals(NoTokenAuth, getAuthMethod(SIMPLE, TOKEN, UseToken.INVALID));
assertAuthEquals(NoTokenAuth, getAuthMethod(KERBEROS, TOKEN, UseToken.INVALID));
}
/*
* kerberos server
*/
@Test
public void testKerberosServer() throws Exception {
// doesn't try SASL
assertAuthEquals(Denied(SIMPLE), getAuthMethod(SIMPLE, KERBEROS));
// does try SASL
assertAuthEquals(No(TOKEN,KERBEROS), getAuthMethod(SIMPLE, KERBEROS, UseToken.OTHER));
// no tgt
assertAuthEquals(KrbFailed, getAuthMethod(KERBEROS, KERBEROS));
assertAuthEquals(KrbFailed, getAuthMethod(KERBEROS, KERBEROS, UseToken.OTHER));
}
@Test
public void testKerberosServerWithTokens() throws Exception {
// can use tokens regardless of auth
assertAuthEquals(TOKEN, getAuthMethod(SIMPLE, KERBEROS, UseToken.VALID));
assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, KERBEROS, UseToken.VALID));
enableSecretManager = false;
// shouldn't even try token because server didn't tell us to
assertAuthEquals(No(KERBEROS), getAuthMethod(SIMPLE, KERBEROS, UseToken.VALID));
assertAuthEquals(KrbFailed, getAuthMethod(KERBEROS, KERBEROS, UseToken.VALID));
}
@Test
public void testKerberosServerWithInvalidTokens() throws Exception {
assertAuthEquals(BadToken, getAuthMethod(SIMPLE, KERBEROS, UseToken.INVALID));
assertAuthEquals(BadToken, getAuthMethod(KERBEROS, KERBEROS, UseToken.INVALID));
enableSecretManager = false;
assertAuthEquals(No(KERBEROS), getAuthMethod(SIMPLE, KERBEROS, UseToken.INVALID));
assertAuthEquals(KrbFailed, getAuthMethod(KERBEROS, KERBEROS, UseToken.INVALID));
}
// test helpers
private String getAuthMethod(
final AuthMethod clientAuth,
final AuthMethod serverAuth) throws Exception {
try {
return internalGetAuthMethod(clientAuth, serverAuth, UseToken.NONE);
} catch (Exception e) {
LOG.warn("Auth method failure", e);
return e.toString();
}
}
private String getAuthMethod(
final AuthMethod clientAuth,
final AuthMethod serverAuth,
final UseToken tokenType) throws Exception {
try {
return internalGetAuthMethod(clientAuth, serverAuth, tokenType);
} catch (Exception e) {
LOG.warn("Auth method failure", e);
return e.toString();
}
}
private String internalGetAuthMethod(
final AuthMethod clientAuth,
final AuthMethod serverAuth,
final UseToken tokenType) throws Exception {
final Configuration serverConf = new Configuration(conf);
serverConf.set(HADOOP_SECURITY_AUTHENTICATION, serverAuth.toString());
UserGroupInformation.setConfiguration(serverConf);
final UserGroupInformation serverUgi = (serverAuth == KERBEROS)
? UserGroupInformation.createRemoteUser("server/localhost@NONE")
: UserGroupInformation.createRemoteUser("server");
serverUgi.setAuthenticationMethod(serverAuth);
final TestTokenSecretManager sm = new TestTokenSecretManager();
boolean useSecretManager = (serverAuth != SIMPLE);
if (enableSecretManager != null) {
useSecretManager &= enableSecretManager.booleanValue();
}
if (forceSecretManager != null) {
useSecretManager |= forceSecretManager.booleanValue();
}
final SecretManager<?> serverSm = useSecretManager ? sm : null;
Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
@Override
public Server run() throws IOException {
Server server = new RPC.Builder(serverConf)
.setProtocol(TestSaslProtocol.class)
.setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true)
.setSecretManager(serverSm)
.build();
server.start();
return server;
}
});
final Configuration clientConf = new Configuration(conf);
clientConf.set(HADOOP_SECURITY_AUTHENTICATION, clientAuth.toString());
clientConf.setBoolean(
CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
clientFallBackToSimpleAllowed);
UserGroupInformation.setConfiguration(clientConf);
final UserGroupInformation clientUgi =
UserGroupInformation.createRemoteUser("client");
clientUgi.setAuthenticationMethod(clientAuth);
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
if (tokenType != UseToken.NONE) {
TestTokenIdentifier tokenId = new TestTokenIdentifier(
new Text(clientUgi.getUserName()));
Token<TestTokenIdentifier> token = null;
switch (tokenType) {
case VALID:
token = new Token<TestTokenIdentifier>(tokenId, sm);
SecurityUtil.setTokenService(token, addr);
break;
case INVALID:
token = new Token<TestTokenIdentifier>(
tokenId.getBytes(), "bad-password!".getBytes(),
tokenId.getKind(), null);
SecurityUtil.setTokenService(token, addr);
break;
case OTHER:
token = new Token<TestTokenIdentifier>();
break;
case NONE: // won't get here
}
clientUgi.addToken(token);
}
try {
LOG.info("trying ugi:"+clientUgi+" tokens:"+clientUgi.getTokens());
return clientUgi.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws IOException {
TestSaslProtocol proxy = null;
try {
proxy = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, clientConf);
proxy.ping();
// make sure the other side thinks we are who we said we are!!!
assertEquals(clientUgi.getUserName(), proxy.getAuthUser());
AuthMethod authMethod = proxy.getAuthMethod();
// verify sasl completed with correct QOP
assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null,
RPC.getConnectionIdForProxy(proxy).getSaslQop());
return authMethod.toString();
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
});
} finally {
server.stop();
}
}
private static void assertAuthEquals(AuthMethod expect,
String actual) {
assertEquals(expect.toString(), actual);
}
private static void assertAuthEquals(Pattern expect,
String actual) {
// this allows us to see the regexp and the value it didn't match
if (!expect.matcher(actual).matches()) {
assertEquals(expect, actual); // it failed
} else {
assertTrue(true); // it matched
}
}
/*
* Class used to test overriding QOP values using SaslPropertiesResolver
*/
static class AuthSaslPropertiesResolver extends SaslPropertiesResolver{
@Override
public Map<String, String> getServerProperties(InetAddress address) {
Map<String, String> newPropertes = new HashMap<String, String>(getDefaultProperties());
newPropertes.put(Sasl.QOP, QualityOfProtection.AUTHENTICATION.getSaslQop());
return newPropertes;
}
}
public static void main(String[] args) throws Exception {
System.out.println("Testing Kerberos authentication over RPC");
if (args.length != 2) {
System.err
.println("Usage: java <options> org.apache.hadoop.ipc.TestSaslRPC "
+ " <serverPrincipal> <keytabFile>");
System.exit(-1);
}
String principal = args[0];
String keytab = args[1];
testKerberosRpc(principal, keytab);
}
}
| 38,841 | 37.457426 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import junit.framework.TestCase;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.BlockingQueue;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration;
import org.mockito.Matchers;
import static org.apache.hadoop.ipc.FairCallQueue.IPC_CALLQUEUE_PRIORITY_LEVELS_KEY;
public class TestFairCallQueue extends TestCase {
private FairCallQueue<Schedulable> fcq;
private Schedulable mockCall(String id) {
Schedulable mockCall = mock(Schedulable.class);
UserGroupInformation ugi = mock(UserGroupInformation.class);
when(ugi.getUserName()).thenReturn(id);
when(mockCall.getUserGroupInformation()).thenReturn(ugi);
return mockCall;
}
// A scheduler which always schedules into priority zero
private RpcScheduler alwaysZeroScheduler;
{
RpcScheduler sched = mock(RpcScheduler.class);
when(sched.getPriorityLevel(Matchers.<Schedulable>any())).thenReturn(0); // always queue 0
alwaysZeroScheduler = sched;
}
public void setUp() {
Configuration conf = new Configuration();
conf.setInt("ns." + IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
fcq = new FairCallQueue<Schedulable>(5, "ns", conf);
}
//
// Ensure that FairCallQueue properly implements BlockingQueue
//
public void testPollReturnsNullWhenEmpty() {
assertNull(fcq.poll());
}
public void testPollReturnsTopCallWhenNotEmpty() {
Schedulable call = mockCall("c");
assertTrue(fcq.offer(call));
assertEquals(call, fcq.poll());
// Poll took it out so the fcq is empty
assertEquals(0, fcq.size());
}
public void testOfferSucceeds() {
fcq.setScheduler(alwaysZeroScheduler);
for (int i = 0; i < 5; i++) {
// We can fit 10 calls
assertTrue(fcq.offer(mockCall("c")));
}
assertEquals(5, fcq.size());
}
public void testOfferFailsWhenFull() {
fcq.setScheduler(alwaysZeroScheduler);
for (int i = 0; i < 5; i++) { assertTrue(fcq.offer(mockCall("c"))); }
assertFalse(fcq.offer(mockCall("c"))); // It's full
assertEquals(5, fcq.size());
}
public void testOfferSucceedsWhenScheduledLowPriority() {
// Scheduler will schedule into queue 0 x 5, then queue 1
RpcScheduler sched = mock(RpcScheduler.class);
when(sched.getPriorityLevel(Matchers.<Schedulable>any())).thenReturn(0, 0, 0, 0, 0, 1, 0);
fcq.setScheduler(sched);
for (int i = 0; i < 5; i++) { assertTrue(fcq.offer(mockCall("c"))); }
assertTrue(fcq.offer(mockCall("c")));
assertEquals(6, fcq.size());
}
public void testPeekNullWhenEmpty() {
assertNull(fcq.peek());
}
public void testPeekNonDestructive() {
Schedulable call = mockCall("c");
assertTrue(fcq.offer(call));
assertEquals(call, fcq.peek());
assertEquals(call, fcq.peek()); // Non-destructive
assertEquals(1, fcq.size());
}
public void testPeekPointsAtHead() {
Schedulable call = mockCall("c");
Schedulable next = mockCall("b");
fcq.offer(call);
fcq.offer(next);
assertEquals(call, fcq.peek()); // Peek points at the head
}
public void testPollTimeout() throws InterruptedException {
fcq.setScheduler(alwaysZeroScheduler);
assertNull(fcq.poll(10, TimeUnit.MILLISECONDS));
}
public void testPollSuccess() throws InterruptedException {
fcq.setScheduler(alwaysZeroScheduler);
Schedulable call = mockCall("c");
assertTrue(fcq.offer(call));
assertEquals(call, fcq.poll(10, TimeUnit.MILLISECONDS));
assertEquals(0, fcq.size());
}
public void testOfferTimeout() throws InterruptedException {
fcq.setScheduler(alwaysZeroScheduler);
for (int i = 0; i < 5; i++) {
assertTrue(fcq.offer(mockCall("c"), 10, TimeUnit.MILLISECONDS));
}
assertFalse(fcq.offer(mockCall("e"), 10, TimeUnit.MILLISECONDS)); // It's full
assertEquals(5, fcq.size());
}
public void testDrainTo() {
Configuration conf = new Configuration();
conf.setInt("ns." + IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
FairCallQueue<Schedulable> fcq2 = new FairCallQueue<Schedulable>(10, "ns", conf);
fcq.setScheduler(alwaysZeroScheduler);
fcq2.setScheduler(alwaysZeroScheduler);
// Start with 3 in fcq, to be drained
for (int i = 0; i < 3; i++) {
fcq.offer(mockCall("c"));
}
fcq.drainTo(fcq2);
assertEquals(0, fcq.size());
assertEquals(3, fcq2.size());
}
public void testDrainToWithLimit() {
Configuration conf = new Configuration();
conf.setInt("ns." + IPC_CALLQUEUE_PRIORITY_LEVELS_KEY, 2);
FairCallQueue<Schedulable> fcq2 = new FairCallQueue<Schedulable>(10, "ns", conf);
fcq.setScheduler(alwaysZeroScheduler);
fcq2.setScheduler(alwaysZeroScheduler);
// Start with 3 in fcq, to be drained
for (int i = 0; i < 3; i++) {
fcq.offer(mockCall("c"));
}
fcq.drainTo(fcq2, 2);
assertEquals(1, fcq.size());
assertEquals(2, fcq2.size());
}
public void testInitialRemainingCapacity() {
assertEquals(10, fcq.remainingCapacity());
}
public void testFirstQueueFullRemainingCapacity() {
fcq.setScheduler(alwaysZeroScheduler);
while (fcq.offer(mockCall("c"))) ; // Queue 0 will fill up first, then queue 1
assertEquals(5, fcq.remainingCapacity());
}
public void testAllQueuesFullRemainingCapacity() {
RpcScheduler sched = mock(RpcScheduler.class);
when(sched.getPriorityLevel(Matchers.<Schedulable>any())).thenReturn(0, 0, 0, 0, 0, 1, 1, 1, 1, 1);
fcq.setScheduler(sched);
while (fcq.offer(mockCall("c"))) ;
assertEquals(0, fcq.remainingCapacity());
assertEquals(10, fcq.size());
}
public void testQueuesPartialFilledRemainingCapacity() {
RpcScheduler sched = mock(RpcScheduler.class);
when(sched.getPriorityLevel(Matchers.<Schedulable>any())).thenReturn(0, 1, 0, 1, 0);
fcq.setScheduler(sched);
for (int i = 0; i < 5; i++) { fcq.offer(mockCall("c")); }
assertEquals(5, fcq.remainingCapacity());
assertEquals(5, fcq.size());
}
/**
* Putter produces FakeCalls
*/
public class Putter implements Runnable {
private final BlockingQueue<Schedulable> cq;
public final String tag;
public volatile int callsAdded = 0; // How many calls we added, accurate unless interrupted
private final int maxCalls;
private final CountDownLatch latch;
public Putter(BlockingQueue<Schedulable> aCq, int maxCalls, String tag,
CountDownLatch latch) {
this.maxCalls = maxCalls;
this.cq = aCq;
this.tag = tag;
this.latch = latch;
}
private String getTag() {
if (this.tag != null) return this.tag;
return "";
}
@Override
public void run() {
try {
// Fill up to max (which is infinite if maxCalls < 0)
while (callsAdded < maxCalls || maxCalls < 0) {
cq.put(mockCall(getTag()));
callsAdded++;
latch.countDown();
}
} catch (InterruptedException e) {
return;
}
}
}
/**
* Taker consumes FakeCalls
*/
public class Taker implements Runnable {
private final BlockingQueue<Schedulable> cq;
public final String tag; // if >= 0 means we will only take the matching tag, and put back
// anything else
public volatile int callsTaken = 0; // total calls taken, accurate if we aren't interrupted
public volatile Schedulable lastResult = null; // the last thing we took
private final int maxCalls; // maximum calls to take
private final CountDownLatch latch;
private IdentityProvider uip;
public Taker(BlockingQueue<Schedulable> aCq, int maxCalls, String tag,
CountDownLatch latch) {
this.maxCalls = maxCalls;
this.cq = aCq;
this.tag = tag;
this.uip = new UserIdentityProvider();
this.latch = latch;
}
@Override
public void run() {
try {
// Take while we don't exceed maxCalls, or if maxCalls is undefined (< 0)
while (callsTaken < maxCalls || maxCalls < 0) {
Schedulable res = cq.take();
String identity = uip.makeIdentity(res);
if (tag != null && this.tag.equals(identity)) {
// This call does not match our tag, we should put it back and try again
cq.put(res);
} else {
callsTaken++;
latch.countDown();
lastResult = res;
}
}
} catch (InterruptedException e) {
return;
}
}
}
// Assert we can take exactly the numberOfTakes
public void assertCanTake(BlockingQueue<Schedulable> cq, int numberOfTakes,
int takeAttempts) throws InterruptedException {
CountDownLatch latch = new CountDownLatch(numberOfTakes);
Taker taker = new Taker(cq, takeAttempts, "default", latch);
Thread t = new Thread(taker);
t.start();
latch.await();
assertEquals(numberOfTakes, taker.callsTaken);
t.interrupt();
}
// Assert we can put exactly the numberOfPuts
public void assertCanPut(BlockingQueue<Schedulable> cq, int numberOfPuts,
int putAttempts) throws InterruptedException {
CountDownLatch latch = new CountDownLatch(numberOfPuts);
Putter putter = new Putter(cq, putAttempts, null, latch);
Thread t = new Thread(putter);
t.start();
latch.await();
assertEquals(numberOfPuts, putter.callsAdded);
t.interrupt();
}
// Make sure put will overflow into lower queues when the top is full
public void testPutOverflows() throws InterruptedException {
fcq.setScheduler(alwaysZeroScheduler);
// We can fit more than 5, even though the scheduler suggests the top queue
assertCanPut(fcq, 8, 8);
assertEquals(8, fcq.size());
}
public void testPutBlocksWhenAllFull() throws InterruptedException {
fcq.setScheduler(alwaysZeroScheduler);
assertCanPut(fcq, 10, 10); // Fill up
assertEquals(10, fcq.size());
// Put more which causes overflow
assertCanPut(fcq, 0, 1); // Will block
}
public void testTakeBlocksWhenEmpty() throws InterruptedException {
fcq.setScheduler(alwaysZeroScheduler);
assertCanTake(fcq, 0, 1);
}
public void testTakeRemovesCall() throws InterruptedException {
fcq.setScheduler(alwaysZeroScheduler);
Schedulable call = mockCall("c");
fcq.offer(call);
assertEquals(call, fcq.take());
assertEquals(0, fcq.size());
}
public void testTakeTriesNextQueue() throws InterruptedException {
// Make a FCQ filled with calls in q 1 but empty in q 0
RpcScheduler q1Scheduler = mock(RpcScheduler.class);
when(q1Scheduler.getPriorityLevel(Matchers.<Schedulable>any())).thenReturn(1);
fcq.setScheduler(q1Scheduler);
// A mux which only draws from q 0
RpcMultiplexer q0mux = mock(RpcMultiplexer.class);
when(q0mux.getAndAdvanceCurrentIndex()).thenReturn(0);
fcq.setMultiplexer(q0mux);
Schedulable call = mockCall("c");
fcq.put(call);
// Take from q1 even though mux said q0, since q0 empty
assertEquals(call, fcq.take());
assertEquals(0, fcq.size());
}
}
| 12,263 | 29.431762 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.InterruptedIOException;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.nio.channels.ClosedByInterruptException;
/**
* tests that the proxy can be interrupted
*/
public class TestRPCWaitForProxy extends Assert {
private static final String ADDRESS = "0.0.0.0";
private static final Logger
LOG = LoggerFactory.getLogger(TestRPCWaitForProxy.class);
private static final Configuration conf = new Configuration();
/**
* This tests that the time-bounded wait for a proxy operation works, and
* times out.
*
* @throws Throwable any exception other than that which was expected
*/
@Test(timeout = 10000)
public void testWaitForProxy() throws Throwable {
RpcThread worker = new RpcThread(0);
worker.start();
worker.join();
Throwable caught = worker.getCaught();
assertNotNull("No exception was raised", caught);
if (!(caught instanceof ConnectException)) {
throw caught;
}
}
/**
* This test sets off a blocking thread and then interrupts it, before
* checking that the thread was interrupted
*
* @throws Throwable any exception other than that which was expected
*/
@Test(timeout = 10000)
public void testInterruptedWaitForProxy() throws Throwable {
RpcThread worker = new RpcThread(100);
worker.start();
Thread.sleep(1000);
assertTrue("worker hasn't started", worker.waitStarted);
worker.interrupt();
worker.join();
Throwable caught = worker.getCaught();
assertNotNull("No exception was raised", caught);
// looking for the root cause here, which can be wrapped
// as part of the NetUtils work. Having this test look
// a the type of exception there would be brittle to improvements
// in exception diagnostics.
Throwable cause = caught.getCause();
if (cause == null) {
// no inner cause, use outer exception as root cause.
cause = caught;
}
if (!(cause instanceof InterruptedIOException)
&& !(cause instanceof ClosedByInterruptException)) {
throw caught;
}
}
/**
* This thread waits for a proxy for the specified timeout, and retains any
* throwable that was raised in the process
*/
private class RpcThread extends Thread {
private Throwable caught;
private int connectRetries;
private volatile boolean waitStarted = false;
private RpcThread(int connectRetries) {
this.connectRetries = connectRetries;
}
@Override
public void run() {
try {
Configuration config = new Configuration(conf);
config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
connectRetries);
config.setInt(
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
connectRetries);
waitStarted = true;
TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
TestProtocol.versionID,
new InetSocketAddress(ADDRESS, 20),
config,
15000L);
proxy.echo("");
} catch (Throwable throwable) {
caught = throwable;
}
}
public Throwable getCaught() {
return caught;
}
}
}
| 4,294 | 31.78626 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.lang.reflect.Method;
import java.net.InetSocketAddress;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.net.NetUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/** Unit test for supporting method-name based compatible RPCs. */
public class TestRPCCompatibility {
private static final String ADDRESS = "0.0.0.0";
private static InetSocketAddress addr;
private static RPC.Server server;
private ProtocolProxy<?> proxy;
public static final Log LOG =
LogFactory.getLog(TestRPCCompatibility.class);
private static Configuration conf = new Configuration();
public interface TestProtocol0 extends VersionedProtocol {
public static final long versionID = 0L;
void ping() throws IOException;
}
public interface TestProtocol1 extends TestProtocol0 {
String echo(String value) throws IOException;
}
// TestProtocol2 is a compatible impl of TestProtocol1 - hence use its name
@ProtocolInfo(protocolName=
"org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1")
public interface TestProtocol2 extends TestProtocol1 {
int echo(int value) throws IOException;
}
public static class TestImpl0 implements TestProtocol0 {
@Override
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
return versionID;
}
@SuppressWarnings("unchecked")
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHashCode)
throws IOException {
Class<? extends VersionedProtocol> inter;
try {
inter = (Class<? extends VersionedProtocol>)getClass().getGenericInterfaces()[0];
} catch (Exception e) {
throw new IOException(e);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHashCode,
getProtocolVersion(protocol, clientVersion), inter);
}
@Override
public void ping() { return; }
}
public static class TestImpl1 extends TestImpl0 implements TestProtocol1 {
@Override
public String echo(String value) { return value; }
@Override
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
return TestProtocol1.versionID;
}
}
public static class TestImpl2 extends TestImpl1 implements TestProtocol2 {
@Override
public int echo(int value) { return value; }
@Override
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
return TestProtocol2.versionID;
}
}
@Before
public void setUp() {
ProtocolSignature.resetCache();
}
@After
public void tearDown() {
if (proxy != null) {
RPC.stopProxy(proxy.getProxy());
proxy = null;
}
if (server != null) {
server.stop();
server = null;
}
}
@Test // old client vs new server
public void testVersion0ClientVersion1Server() throws Exception {
// create a server with two handlers
TestImpl1 impl = new TestImpl1();
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
.setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start();
addr = NetUtils.getConnectAddress(server);
proxy = RPC.getProtocolProxy(
TestProtocol0.class, TestProtocol0.versionID, addr, conf);
TestProtocol0 proxy0 = (TestProtocol0)proxy.getProxy();
proxy0.ping();
}
@Test // old client vs new server
public void testVersion1ClientVersion0Server() throws Exception {
// create a server with two handlers
server = new RPC.Builder(conf).setProtocol(TestProtocol0.class)
.setInstance(new TestImpl0()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
server.start();
addr = NetUtils.getConnectAddress(server);
proxy = RPC.getProtocolProxy(
TestProtocol1.class, TestProtocol1.versionID, addr, conf);
TestProtocol1 proxy1 = (TestProtocol1)proxy.getProxy();
proxy1.ping();
try {
proxy1.echo("hello");
fail("Echo should fail");
} catch(IOException e) {
}
}
private class Version2Client {
private TestProtocol2 proxy2;
private ProtocolProxy<TestProtocol2> serverInfo;
private Version2Client() throws IOException {
serverInfo = RPC.getProtocolProxy(
TestProtocol2.class, TestProtocol2.versionID, addr, conf);
proxy2 = serverInfo.getProxy();
}
public int echo(int value) throws IOException, NumberFormatException {
if (serverInfo.isMethodSupported("echo", int.class)) {
System.out.println("echo int is supported");
return -value; // use version 3 echo long
} else { // server is version 2
System.out.println("echo int is NOT supported");
return Integer.parseInt(proxy2.echo(String.valueOf(value)));
}
}
public String echo(String value) throws IOException {
return proxy2.echo(value);
}
public void ping() throws IOException {
proxy2.ping();
}
}
@Test // Compatible new client & old server
public void testVersion2ClientVersion1Server() throws Exception {
// create a server with two handlers
TestImpl1 impl = new TestImpl1();
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
.setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start();
addr = NetUtils.getConnectAddress(server);
Version2Client client = new Version2Client();
client.ping();
assertEquals("hello", client.echo("hello"));
// echo(int) is not supported by server, so returning 3
// This verifies that echo(int) and echo(String)'s hash codes are different
assertEquals(3, client.echo(3));
}
@Test // equal version client and server
public void testVersion2ClientVersion2Server() throws Exception {
// create a server with two handlers
TestImpl2 impl = new TestImpl2();
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
.setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start();
addr = NetUtils.getConnectAddress(server);
Version2Client client = new Version2Client();
client.ping();
assertEquals("hello", client.echo("hello"));
// now that echo(int) is supported by the server, echo(int) should return -3
assertEquals(-3, client.echo(3));
}
public interface TestProtocol3 {
int echo(String value);
int echo(int value);
int echo_alias(int value);
int echo(int value1, int value2);
}
@Test
public void testHashCode() throws Exception {
// make sure that overriding methods have different hashcodes
Method strMethod = TestProtocol3.class.getMethod("echo", String.class);
int stringEchoHash = ProtocolSignature.getFingerprint(strMethod);
Method intMethod = TestProtocol3.class.getMethod("echo", int.class);
int intEchoHash = ProtocolSignature.getFingerprint(intMethod);
assertFalse(stringEchoHash == intEchoHash);
// make sure methods with the same signature
// from different declaring classes have the same hash code
int intEchoHash1 = ProtocolSignature.getFingerprint(
TestProtocol2.class.getMethod("echo", int.class));
assertEquals(intEchoHash, intEchoHash1);
// Methods with the same name and parameter types but different returning
// types have different hash codes
int stringEchoHash1 = ProtocolSignature.getFingerprint(
TestProtocol2.class.getMethod("echo", String.class));
assertFalse(stringEchoHash == stringEchoHash1);
// Make sure that methods with the same returning type and parameter types
// but different method names have different hash code
int intEchoHashAlias = ProtocolSignature.getFingerprint(
TestProtocol3.class.getMethod("echo_alias", int.class));
assertFalse(intEchoHash == intEchoHashAlias);
// Make sure that methods with the same returning type and method name but
// larger number of parameter types have different hash code
int intEchoHash2 = ProtocolSignature.getFingerprint(
TestProtocol3.class.getMethod("echo", int.class, int.class));
assertFalse(intEchoHash == intEchoHash2);
// make sure that methods order does not matter for method array hash code
int hash1 = ProtocolSignature.getFingerprint(new Method[] {intMethod, strMethod});
int hash2 = ProtocolSignature.getFingerprint(new Method[] {strMethod, intMethod});
assertEquals(hash1, hash2);
}
@ProtocolInfo(protocolName=
"org.apache.hadoop.ipc.TestRPCCompatibility$TestProtocol1")
public interface TestProtocol4 extends TestProtocol2 {
public static final long versionID = 4L;
@Override
int echo(int value) throws IOException;
}
@Test
public void testVersionMismatch() throws IOException {
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
.setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
server.start();
addr = NetUtils.getConnectAddress(server);
TestProtocol4 proxy = RPC.getProxy(TestProtocol4.class,
TestProtocol4.versionID, addr, conf);
try {
proxy.echo(21);
fail("The call must throw VersionMismatch exception");
} catch (RemoteException ex) {
Assert.assertEquals(RPC.VersionMismatch.class.getName(),
ex.getClassName());
Assert.assertTrue(ex.getErrorCode().equals(
RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH));
} catch (IOException ex) {
fail("Expected version mismatch but got " + ex);
}
}
@Test
public void testIsMethodSupported() throws IOException {
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
.setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
server.start();
addr = NetUtils.getConnectAddress(server);
TestProtocol2 proxy = RPC.getProxy(TestProtocol2.class,
TestProtocol2.versionID, addr, conf);
boolean supported = RpcClientUtil.isMethodSupported(proxy,
TestProtocol2.class, RPC.RpcKind.RPC_WRITABLE,
RPC.getProtocolVersion(TestProtocol2.class), "echo");
Assert.assertTrue(supported);
supported = RpcClientUtil.isMethodSupported(proxy,
TestProtocol2.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(TestProtocol2.class), "echo");
Assert.assertFalse(supported);
}
/**
* Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up
* the server registry to extract protocol signatures and versions.
*/
@Test
public void testProtocolMetaInfoSSTranslatorPB() throws Exception {
TestImpl1 impl = new TestImpl1();
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
.setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
server.start();
ProtocolMetaInfoServerSideTranslatorPB xlator =
new ProtocolMetaInfoServerSideTranslatorPB(server);
GetProtocolSignatureResponseProto resp = xlator.getProtocolSignature(
null,
createGetProtocolSigRequestProto(TestProtocol1.class,
RPC.RpcKind.RPC_PROTOCOL_BUFFER));
//No signatures should be found
Assert.assertEquals(0, resp.getProtocolSignatureCount());
resp = xlator.getProtocolSignature(
null,
createGetProtocolSigRequestProto(TestProtocol1.class,
RPC.RpcKind.RPC_WRITABLE));
Assert.assertEquals(1, resp.getProtocolSignatureCount());
ProtocolSignatureProto sig = resp.getProtocolSignatureList().get(0);
Assert.assertEquals(TestProtocol1.versionID, sig.getVersion());
boolean found = false;
int expected = ProtocolSignature.getFingerprint(TestProtocol1.class
.getMethod("echo", String.class));
for (int m : sig.getMethodsList()) {
if (expected == m) {
found = true;
break;
}
}
Assert.assertTrue(found);
}
private GetProtocolSignatureRequestProto createGetProtocolSigRequestProto(
Class<?> protocol, RPC.RpcKind rpcKind) {
GetProtocolSignatureRequestProto.Builder builder =
GetProtocolSignatureRequestProto.newBuilder();
builder.setProtocol(protocol.getName());
builder.setRpcKind(rpcKind.toString());
return builder.build();
}
}
| 14,408 | 35.571066 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URISyntaxException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyRequestProto;
import org.apache.hadoop.ipc.protobuf.TestProtos.EmptyResponseProto;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpc2Proto;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.net.NetUtils;
import org.junit.Assert;
import org.junit.Test;
import org.junit.Before;
import org.junit.After;
import com.google.protobuf.BlockingService;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* Test for testing protocol buffer based RPC mechanism.
* This test depends on test.proto definition of types in src/test/proto
* and protobuf service definition from src/test/test_rpc_service.proto
*/
public class TestProtoBufRpc {
public final static String ADDRESS = "0.0.0.0";
public final static int PORT = 0;
private static InetSocketAddress addr;
private static Configuration conf;
private static RPC.Server server;
@ProtocolInfo(protocolName = "testProto", protocolVersion = 1)
public interface TestRpcService
extends TestProtobufRpcProto.BlockingInterface {
}
@ProtocolInfo(protocolName = "testProto2", protocolVersion = 1)
public interface TestRpcService2 extends
TestProtobufRpc2Proto.BlockingInterface {
}
public static class PBServerImpl implements TestRpcService {
@Override
public EmptyResponseProto ping(RpcController unused,
EmptyRequestProto request) throws ServiceException {
// Ensure clientId is received
byte[] clientId = Server.getClientId();
Assert.assertNotNull(Server.getClientId());
Assert.assertEquals(16, clientId.length);
return EmptyResponseProto.newBuilder().build();
}
@Override
public EchoResponseProto echo(RpcController unused, EchoRequestProto request)
throws ServiceException {
return EchoResponseProto.newBuilder().setMessage(request.getMessage())
.build();
}
@Override
public EmptyResponseProto error(RpcController unused,
EmptyRequestProto request) throws ServiceException {
throw new ServiceException("error", new RpcServerException("error"));
}
@Override
public EmptyResponseProto error2(RpcController unused,
EmptyRequestProto request) throws ServiceException {
throw new ServiceException("error", new URISyntaxException("",
"testException"));
}
}
public static class PBServer2Impl implements TestRpcService2 {
@Override
public EmptyResponseProto ping2(RpcController unused,
EmptyRequestProto request) throws ServiceException {
return EmptyResponseProto.newBuilder().build();
}
@Override
public EchoResponseProto echo2(RpcController unused, EchoRequestProto request)
throws ServiceException {
return EchoResponseProto.newBuilder().setMessage(request.getMessage())
.build();
}
}
@Before
public void setUp() throws IOException { // Setup server for both protocols
conf = new Configuration();
conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024);
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
// Create server side implementation
PBServerImpl serverImpl = new PBServerImpl();
BlockingService service = TestProtobufRpcProto
.newReflectiveBlockingService(serverImpl);
// Get RPC server for server side implementation
server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
.setInstance(service).setBindAddress(ADDRESS).setPort(PORT).build();
addr = NetUtils.getConnectAddress(server);
// now the second protocol
PBServer2Impl server2Impl = new PBServer2Impl();
BlockingService service2 = TestProtobufRpc2Proto
.newReflectiveBlockingService(server2Impl);
server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService2.class,
service2);
server.start();
}
@After
public void tearDown() throws Exception {
server.stop();
}
private static TestRpcService getClient() throws IOException {
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
return RPC.getProxy(TestRpcService.class, 0, addr, conf);
}
private static TestRpcService2 getClient2() throws IOException {
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, TestRpcService2.class,
ProtobufRpcEngine.class);
return RPC.getProxy(TestRpcService2.class, 0, addr,
conf);
}
@Test (timeout=5000)
public void testProtoBufRpc() throws Exception {
TestRpcService client = getClient();
testProtoBufRpc(client);
}
// separated test out so that other tests can call it.
public static void testProtoBufRpc(TestRpcService client) throws Exception {
// Test ping method
EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
client.ping(null, emptyRequest);
// Test echo method
EchoRequestProto echoRequest = EchoRequestProto.newBuilder()
.setMessage("hello").build();
EchoResponseProto echoResponse = client.echo(null, echoRequest);
Assert.assertEquals(echoResponse.getMessage(), "hello");
// Test error method - error should be thrown as RemoteException
try {
client.error(null, emptyRequest);
Assert.fail("Expected exception is not thrown");
} catch (ServiceException e) {
RemoteException re = (RemoteException)e.getCause();
RpcServerException rse = (RpcServerException) re
.unwrapRemoteException(RpcServerException.class);
Assert.assertNotNull(rse);
Assert.assertTrue(re.getErrorCode().equals(
RpcErrorCodeProto.ERROR_RPC_SERVER));
}
}
@Test (timeout=5000)
public void testProtoBufRpc2() throws Exception {
TestRpcService2 client = getClient2();
// Test ping method
EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
client.ping2(null, emptyRequest);
// Test echo method
EchoRequestProto echoRequest = EchoRequestProto.newBuilder()
.setMessage("hello").build();
EchoResponseProto echoResponse = client.echo2(null, echoRequest);
Assert.assertEquals(echoResponse.getMessage(), "hello");
// Ensure RPC metrics are updated
MetricsRecordBuilder rpcMetrics = getMetrics(server.getRpcMetrics().name());
assertCounterGt("RpcQueueTimeNumOps", 0L, rpcMetrics);
assertCounterGt("RpcProcessingTimeNumOps", 0L, rpcMetrics);
MetricsRecordBuilder rpcDetailedMetrics =
getMetrics(server.getRpcDetailedMetrics().name());
assertCounterGt("Echo2NumOps", 0L, rpcDetailedMetrics);
}
@Test (timeout=5000)
public void testProtoBufRandomException() throws Exception {
TestRpcService client = getClient();
EmptyRequestProto emptyRequest = EmptyRequestProto.newBuilder().build();
try {
client.error2(null, emptyRequest);
} catch (ServiceException se) {
Assert.assertTrue(se.getCause() instanceof RemoteException);
RemoteException re = (RemoteException) se.getCause();
Assert.assertTrue(re.getClassName().equals(
URISyntaxException.class.getName()));
Assert.assertTrue(re.getMessage().contains("testException"));
Assert.assertTrue(
re.getErrorCode().equals(RpcErrorCodeProto.ERROR_APPLICATION));
}
}
@Test(timeout=6000)
public void testExtraLongRpc() throws Exception {
TestRpcService2 client = getClient2();
final String shortString = StringUtils.repeat("X", 4);
EchoRequestProto echoRequest = EchoRequestProto.newBuilder()
.setMessage(shortString).build();
// short message goes through
EchoResponseProto echoResponse = client.echo2(null, echoRequest);
Assert.assertEquals(shortString, echoResponse.getMessage());
final String longString = StringUtils.repeat("X", 4096);
echoRequest = EchoRequestProto.newBuilder()
.setMessage(longString).build();
try {
echoResponse = client.echo2(null, echoRequest);
Assert.fail("expected extra-long RPC to fail");
} catch (ServiceException se) {
// expected
}
}
}
| 9,831 | 36.670498 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.BindException;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
/**
* This is intended to be a set of unit tests for the
* org.apache.hadoop.ipc.Server class.
*/
public class TestServer {
@Test
public void testBind() throws Exception {
Configuration conf = new Configuration();
ServerSocket socket = new ServerSocket();
InetSocketAddress address = new InetSocketAddress("0.0.0.0",0);
socket.bind(address);
try {
int min = socket.getLocalPort();
int max = min + 100;
conf.set("TestRange", min+"-"+max);
ServerSocket socket2 = new ServerSocket();
InetSocketAddress address2 = new InetSocketAddress("0.0.0.0", 0);
Server.bind(socket2, address2, 10, conf, "TestRange");
try {
assertTrue(socket2.isBound());
assertTrue(socket2.getLocalPort() > min);
assertTrue(socket2.getLocalPort() <= max);
} finally {
socket2.close();
}
} finally {
socket.close();
}
}
@Test
public void testBindSimple() throws Exception {
ServerSocket socket = new ServerSocket();
InetSocketAddress address = new InetSocketAddress("0.0.0.0",0);
Server.bind(socket, address, 10);
try {
assertTrue(socket.isBound());
} finally {
socket.close();
}
}
@Test
public void testEmptyConfig() throws Exception {
Configuration conf = new Configuration();
conf.set("TestRange", "");
ServerSocket socket = new ServerSocket();
InetSocketAddress address = new InetSocketAddress("0.0.0.0", 0);
try {
Server.bind(socket, address, 10, conf, "TestRange");
assertTrue(socket.isBound());
} finally {
socket.close();
}
}
@Test
public void testBindError() throws Exception {
Configuration conf = new Configuration();
ServerSocket socket = new ServerSocket();
InetSocketAddress address = new InetSocketAddress("0.0.0.0",0);
socket.bind(address);
try {
int min = socket.getLocalPort();
conf.set("TestRange", min+"-"+min);
ServerSocket socket2 = new ServerSocket();
InetSocketAddress address2 = new InetSocketAddress("0.0.0.0", 0);
boolean caught = false;
try {
Server.bind(socket2, address2, 10, conf, "TestRange");
} catch (BindException e) {
caught = true;
} finally {
socket2.close();
}
assertTrue("Failed to catch the expected bind exception",caught);
} finally {
socket.close();
}
}
@Test
public void testExceptionsHandler() {
Server.ExceptionsHandler handler = new Server.ExceptionsHandler();
handler.addTerseExceptions(IOException.class);
handler.addTerseExceptions(RpcServerException.class, IpcException.class);
assertTrue(handler.isTerse(IOException.class));
assertTrue(handler.isTerse(RpcServerException.class));
assertTrue(handler.isTerse(IpcException.class));
assertFalse(handler.isTerse(RpcClientException.class));
}
}
| 3,993 | 29.257576 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.net.NetUtils;
/**
* This test provokes partial writes in the server, which is
* serving multiple clients.
*/
public class TestIPCServerResponder extends TestCase {
public static final Log LOG =
LogFactory.getLog(TestIPCServerResponder.class);
private static Configuration conf = new Configuration();
public TestIPCServerResponder(final String name) {
super(name);
}
private static final Random RANDOM = new Random();
private static final String ADDRESS = "0.0.0.0";
private static final int BYTE_COUNT = 1024;
private static final byte[] BYTES = new byte[BYTE_COUNT];
static {
for (int i = 0; i < BYTE_COUNT; i++)
BYTES[i] = (byte) ('a' + (i % 26));
}
private static class TestServer extends Server {
private boolean sleep;
public TestServer(final int handlerCount, final boolean sleep)
throws IOException {
super(ADDRESS, 0, BytesWritable.class, handlerCount, conf);
// Set the buffer size to half of the maximum parameter/result size
// to force the socket to block
this.setSocketSendBufSize(BYTE_COUNT / 2);
this.sleep = sleep;
}
@Override
public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param,
long receiveTime) throws IOException {
if (sleep) {
try {
Thread.sleep(RANDOM.nextInt(20)); // sleep a bit
} catch (InterruptedException e) {}
}
return param;
}
}
private static class Caller extends Thread {
private Client client;
private int count;
private InetSocketAddress address;
private boolean failed;
public Caller(final Client client, final InetSocketAddress address,
final int count) {
this.client = client;
this.address = address;
this.count = count;
}
@Override
public void run() {
for (int i = 0; i < count; i++) {
try {
int byteSize = RANDOM.nextInt(BYTE_COUNT);
byte[] bytes = new byte[byteSize];
System.arraycopy(BYTES, 0, bytes, 0, byteSize);
Writable param = new BytesWritable(bytes);
client.call(param, address);
Thread.sleep(RANDOM.nextInt(20));
} catch (Exception e) {
LOG.fatal("Caught Exception", e);
failed = true;
}
}
}
}
public void testResponseBuffer()
throws IOException, InterruptedException {
Server.INITIAL_RESP_BUF_SIZE = 1;
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY,
1);
testServerResponder(1, true, 1, 1, 5);
conf = new Configuration(); // reset configuration
}
public void testServerResponder()
throws IOException, InterruptedException {
testServerResponder(10, true, 1, 10, 200);
}
public void testServerResponder(final int handlerCount,
final boolean handlerSleep,
final int clientCount,
final int callerCount,
final int callCount) throws IOException,
InterruptedException {
Server server = new TestServer(handlerCount, handlerSleep);
server.start();
InetSocketAddress address = NetUtils.getConnectAddress(server);
Client[] clients = new Client[clientCount];
for (int i = 0; i < clientCount; i++) {
clients[i] = new Client(BytesWritable.class, conf);
}
Caller[] callers = new Caller[callerCount];
for (int i = 0; i < callerCount; i++) {
callers[i] = new Caller(clients[i % clientCount], address, callCount);
callers[i].start();
}
for (int i = 0; i < callerCount; i++) {
callers[i].join();
assertFalse(callers[i].failed);
}
for (int i = 0; i < clientCount; i++) {
clients[i].stop();
}
server.stop();
}
}
| 5,210 | 30.969325 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMiniRPCBenchmark.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import org.apache.hadoop.conf.Configuration;
import org.apache.log4j.Level;
import org.junit.Test;
/**
* Test {@link MiniRPCBenchmark}
*/
public class TestMiniRPCBenchmark {
@Test
public void testSimple() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "simple");
MiniRPCBenchmark mb = new MiniRPCBenchmark(Level.DEBUG);
mb.runMiniBenchmark(conf, 10, null, null);
}
}
| 1,288 | 34.805556 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestWeightedRoundRobinMultiplexer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.ipc.WeightedRoundRobinMultiplexer.IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY;
public class TestWeightedRoundRobinMultiplexer {
public static final Log LOG = LogFactory.getLog(TestWeightedRoundRobinMultiplexer.class);
private WeightedRoundRobinMultiplexer mux;
@Test(expected=IllegalArgumentException.class)
public void testInstantiateNegativeMux() {
mux = new WeightedRoundRobinMultiplexer(-1, "", new Configuration());
}
@Test(expected=IllegalArgumentException.class)
public void testInstantiateZeroMux() {
mux = new WeightedRoundRobinMultiplexer(0, "", new Configuration());
}
@Test(expected=IllegalArgumentException.class)
public void testInstantiateIllegalMux() {
Configuration conf = new Configuration();
conf.setStrings("namespace." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
"1", "2", "3");
// ask for 3 weights with 2 queues
mux = new WeightedRoundRobinMultiplexer(2, "namespace", conf);
}
@Test
public void testLegalInstantiation() {
Configuration conf = new Configuration();
conf.setStrings("namespace." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
"1", "2", "3");
// ask for 3 weights with 3 queues
mux = new WeightedRoundRobinMultiplexer(3, "namespace.", conf);
}
@Test
public void testDefaultPattern() {
// Mux of size 1: 0 0 0 0 0, etc
mux = new WeightedRoundRobinMultiplexer(1, "", new Configuration());
for(int i = 0; i < 10; i++) {
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
}
// Mux of size 2: 0 0 1 0 0 1 0 0 1, etc
mux = new WeightedRoundRobinMultiplexer(2, "", new Configuration());
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
// Size 3: 4x0 2x1 1x2, etc
mux = new WeightedRoundRobinMultiplexer(3, "", new Configuration());
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
// Size 4: 8x0 4x1 2x2 1x3
mux = new WeightedRoundRobinMultiplexer(4, "", new Configuration());
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
assertEquals(mux.getAndAdvanceCurrentIndex(), 3);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
}
@Test
public void testCustomPattern() {
// 1x0 1x1
Configuration conf = new Configuration();
conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
"1", "1");
mux = new WeightedRoundRobinMultiplexer(2, "test.custom", conf);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
// 1x0 3x1 2x2
conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,
"1", "3", "2");
mux = new WeightedRoundRobinMultiplexer(3, "test.custom", conf);
for(int i = 0; i < 5; i++) {
assertEquals(mux.getAndAdvanceCurrentIndex(), 0);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 1);
assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
assertEquals(mux.getAndAdvanceCurrentIndex(), 2);
} // Ensure pattern repeats
}
}
| 5,587 | 38.352113 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.Closeable;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.ConnectException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.UTF8;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.hadoop.test.MockitoUtil;
import org.junit.Before;
import org.junit.Test;
import com.google.protobuf.DescriptorProtos;
import com.google.protobuf.DescriptorProtos.EnumDescriptorProto;
/** Unit tests for RPC. */
@SuppressWarnings("deprecation")
public class TestRPC {
private static final String ADDRESS = "0.0.0.0";
public static final Log LOG =
LogFactory.getLog(TestRPC.class);
private static Configuration conf;
@Before
public void setupConf() {
conf = new Configuration();
conf.setClass("rpc.engine." + StoppedProtocol.class.getName(),
StoppedRpcEngine.class, RpcEngine.class);
UserGroupInformation.setConfiguration(conf);
}
int datasize = 1024*100;
int numThreads = 50;
public interface TestProtocol extends VersionedProtocol {
public static final long versionID = 1L;
void ping() throws IOException;
void slowPing(boolean shouldSlow) throws IOException;
void sleep(long delay) throws IOException, InterruptedException;
String echo(String value) throws IOException;
String[] echo(String[] value) throws IOException;
Writable echo(Writable value) throws IOException;
int add(int v1, int v2) throws IOException;
int add(int[] values) throws IOException;
int error() throws IOException;
void testServerGet() throws IOException;
int[] exchange(int[] values) throws IOException;
DescriptorProtos.EnumDescriptorProto exchangeProto(
DescriptorProtos.EnumDescriptorProto arg);
}
public static class TestImpl implements TestProtocol {
int fastPingCounter = 0;
@Override
public long getProtocolVersion(String protocol, long clientVersion) {
return TestProtocol.versionID;
}
@Override
public ProtocolSignature getProtocolSignature(String protocol, long clientVersion,
int hashcode) {
return new ProtocolSignature(TestProtocol.versionID, null);
}
@Override
public void ping() {}
@Override
public synchronized void slowPing(boolean shouldSlow) {
if (shouldSlow) {
while (fastPingCounter < 2) {
try {
wait(); // slow response until two fast pings happened
} catch (InterruptedException ignored) {}
}
fastPingCounter -= 2;
} else {
fastPingCounter++;
notify();
}
}
@Override
public void sleep(long delay) throws InterruptedException {
Thread.sleep(delay);
}
@Override
public String echo(String value) throws IOException { return value; }
@Override
public String[] echo(String[] values) throws IOException { return values; }
@Override
public Writable echo(Writable writable) {
return writable;
}
@Override
public int add(int v1, int v2) {
return v1 + v2;
}
@Override
public int add(int[] values) {
int sum = 0;
for (int i = 0; i < values.length; i++) {
sum += values[i];
}
return sum;
}
@Override
public int error() throws IOException {
throw new IOException("bobo");
}
@Override
public void testServerGet() throws IOException {
if (!(Server.get() instanceof RPC.Server)) {
throw new IOException("Server.get() failed");
}
}
@Override
public int[] exchange(int[] values) {
for (int i = 0; i < values.length; i++) {
values[i] = i;
}
return values;
}
@Override
public EnumDescriptorProto exchangeProto(EnumDescriptorProto arg) {
return arg;
}
}
//
// an object that does a bunch of transactions
//
static class Transactions implements Runnable {
int datasize;
TestProtocol proxy;
Transactions(TestProtocol proxy, int datasize) {
this.proxy = proxy;
this.datasize = datasize;
}
// do two RPC that transfers data.
@Override
public void run() {
int[] indata = new int[datasize];
int[] outdata = null;
int val = 0;
try {
outdata = proxy.exchange(indata);
val = proxy.add(1,2);
} catch (IOException e) {
assertTrue("Exception from RPC exchange() " + e, false);
}
assertEquals(indata.length, outdata.length);
assertEquals(3, val);
for (int i = 0; i < outdata.length; i++) {
assertEquals(outdata[i], i);
}
}
}
//
// A class that does an RPC but does not read its response.
//
static class SlowRPC implements Runnable {
private TestProtocol proxy;
private volatile boolean done;
SlowRPC(TestProtocol proxy) {
this.proxy = proxy;
done = false;
}
boolean isDone() {
return done;
}
@Override
public void run() {
try {
proxy.slowPing(true); // this would hang until two fast pings happened
done = true;
} catch (IOException e) {
assertTrue("SlowRPC ping exception " + e, false);
}
}
}
/**
* A basic interface for testing client-side RPC resource cleanup.
*/
private static interface StoppedProtocol {
long versionID = 0;
public void stop();
}
/**
* A class used for testing cleanup of client side RPC resources.
*/
private static class StoppedRpcEngine implements RpcEngine {
@Override
public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy
) throws IOException {
return getProxy(protocol, clientVersion, addr, ticket, conf, factory,
rpcTimeout, connectionRetryPolicy, null);
}
@SuppressWarnings("unchecked")
@Override
public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
SocketFactory factory, int rpcTimeout,
RetryPolicy connectionRetryPolicy, AtomicBoolean fallbackToSimpleAuth
) throws IOException {
T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(),
new Class[] { protocol }, new StoppedInvocationHandler());
return new ProtocolProxy<T>(protocol, proxy, false);
}
@Override
public org.apache.hadoop.ipc.RPC.Server getServer(Class<?> protocol,
Object instance, String bindAddress, int port, int numHandlers,
int numReaders, int queueSizePerHandler, boolean verbose, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager,
String portRangeConfig) throws IOException {
return null;
}
@Override
public ProtocolProxy<ProtocolMetaInfoPB> getProtocolMetaInfoProxy(
ConnectionId connId, Configuration conf, SocketFactory factory)
throws IOException {
throw new UnsupportedOperationException("This proxy is not supported");
}
}
/**
* An invocation handler which does nothing when invoking methods, and just
* counts the number of times close() is called.
*/
private static class StoppedInvocationHandler
implements InvocationHandler, Closeable {
private int closeCalled = 0;
@Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
return null;
}
@Override
public void close() throws IOException {
closeCalled++;
}
public int getCloseCalled() {
return closeCalled;
}
}
@Test
public void testConfRpc() throws IOException {
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(1).setVerbose(false).build();
// Just one handler
int confQ = conf.getInt(
CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY,
CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT);
assertEquals(confQ, server.getMaxQueueSize());
int confReaders = conf.getInt(
CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,
CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_DEFAULT);
assertEquals(confReaders, server.getNumReaders());
server.stop();
server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(1).setnumReaders(3).setQueueSizePerHandler(200)
.setVerbose(false).build();
assertEquals(3, server.getNumReaders());
assertEquals(200, server.getMaxQueueSize());
server.stop();
}
@Test
public void testProxyAddress() throws IOException {
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build();
TestProtocol proxy = null;
try {
server.start();
InetSocketAddress addr = NetUtils.getConnectAddress(server);
// create a client
proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
assertEquals(addr, RPC.getServerAddress(proxy));
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@Test
public void testSlowRpc() throws IOException {
System.out.println("Testing Slow RPC");
// create a server with two handlers
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
TestProtocol proxy = null;
try {
server.start();
InetSocketAddress addr = NetUtils.getConnectAddress(server);
// create a client
proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
SlowRPC slowrpc = new SlowRPC(proxy);
Thread thread = new Thread(slowrpc, "SlowRPC");
thread.start(); // send a slow RPC, which won't return until two fast pings
assertTrue("Slow RPC should not have finished1.", !slowrpc.isDone());
proxy.slowPing(false); // first fast ping
// verify that the first RPC is still stuck
assertTrue("Slow RPC should not have finished2.", !slowrpc.isDone());
proxy.slowPing(false); // second fast ping
// Now the slow ping should be able to be executed
while (!slowrpc.isDone()) {
System.out.println("Waiting for slow RPC to get done.");
try {
Thread.sleep(1000);
} catch (InterruptedException e) {}
}
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
System.out.println("Down slow rpc testing");
}
}
@Test
public void testCalls() throws IOException {
testCallsInternal(conf);
}
private void testCallsInternal(Configuration conf) throws IOException {
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build();
TestProtocol proxy = null;
try {
server.start();
InetSocketAddress addr = NetUtils.getConnectAddress(server);
proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
proxy.ping();
String stringResult = proxy.echo("foo");
assertEquals(stringResult, "foo");
stringResult = proxy.echo((String)null);
assertEquals(stringResult, null);
// Check rpcMetrics
MetricsRecordBuilder rb = getMetrics(server.rpcMetrics.name());
assertCounter("RpcProcessingTimeNumOps", 3L, rb);
assertCounterGt("SentBytes", 0L, rb);
assertCounterGt("ReceivedBytes", 0L, rb);
// Number of calls to echo method should be 2
rb = getMetrics(server.rpcDetailedMetrics.name());
assertCounter("EchoNumOps", 2L, rb);
// Number of calls to ping method should be 1
assertCounter("PingNumOps", 1L, rb);
String[] stringResults = proxy.echo(new String[]{"foo","bar"});
assertTrue(Arrays.equals(stringResults, new String[]{"foo","bar"}));
stringResults = proxy.echo((String[])null);
assertTrue(Arrays.equals(stringResults, null));
UTF8 utf8Result = (UTF8)proxy.echo(new UTF8("hello world"));
assertEquals(new UTF8("hello world"), utf8Result );
utf8Result = (UTF8)proxy.echo((UTF8)null);
assertEquals(null, utf8Result);
int intResult = proxy.add(1, 2);
assertEquals(intResult, 3);
intResult = proxy.add(new int[] {1, 2});
assertEquals(intResult, 3);
// Test protobufs
EnumDescriptorProto sendProto =
EnumDescriptorProto.newBuilder().setName("test").build();
EnumDescriptorProto retProto = proxy.exchangeProto(sendProto);
assertEquals(sendProto, retProto);
assertNotSame(sendProto, retProto);
boolean caught = false;
try {
proxy.error();
} catch (IOException e) {
if(LOG.isDebugEnabled()) {
LOG.debug("Caught " + e);
}
caught = true;
}
assertTrue(caught);
rb = getMetrics(server.rpcDetailedMetrics.name());
assertCounter("IOExceptionNumOps", 1L, rb);
proxy.testServerGet();
// create multiple threads and make them do large data transfers
System.out.println("Starting multi-threaded RPC test...");
server.setSocketSendBufSize(1024);
Thread threadId[] = new Thread[numThreads];
for (int i = 0; i < numThreads; i++) {
Transactions trans = new Transactions(proxy, datasize);
threadId[i] = new Thread(trans, "TransactionThread-" + i);
threadId[i].start();
}
// wait for all transactions to get over
System.out.println("Waiting for all threads to finish RPCs...");
for (int i = 0; i < numThreads; i++) {
try {
threadId[i].join();
} catch (InterruptedException e) {
i--; // retry
}
}
} finally {
server.stop();
if(proxy!=null) RPC.stopProxy(proxy);
}
}
@Test
public void testStandaloneClient() throws IOException {
try {
TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
TestProtocol.versionID, new InetSocketAddress(ADDRESS, 20), conf, 15000L);
proxy.echo("");
fail("We should not have reached here");
} catch (ConnectException ioe) {
//this is what we expected
}
}
private static final String ACL_CONFIG = "test.protocol.acl";
private static class TestPolicyProvider extends PolicyProvider {
@Override
public Service[] getServices() {
return new Service[] { new Service(ACL_CONFIG, TestProtocol.class) };
}
}
private void doRPCs(Configuration conf, boolean expectFailure) throws IOException {
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).build();
server.refreshServiceAcl(conf, new TestPolicyProvider());
TestProtocol proxy = null;
server.start();
InetSocketAddress addr = NetUtils.getConnectAddress(server);
try {
proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
proxy.ping();
if (expectFailure) {
fail("Expect RPC.getProxy to fail with AuthorizationException!");
}
} catch (RemoteException e) {
if (expectFailure) {
assertEquals("RPC error code should be UNAUTHORIZED", RpcErrorCodeProto.FATAL_UNAUTHORIZED, e.getErrorCode());
assertTrue(e.unwrapRemoteException() instanceof AuthorizationException);
} else {
throw e;
}
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
MetricsRecordBuilder rb = getMetrics(server.rpcMetrics.name());
if (expectFailure) {
assertCounter("RpcAuthorizationFailures", 1L, rb);
} else {
assertCounter("RpcAuthorizationSuccesses", 1L, rb);
}
//since we don't have authentication turned ON, we should see
// 0 for the authentication successes and 0 for failure
assertCounter("RpcAuthenticationFailures", 0L, rb);
assertCounter("RpcAuthenticationSuccesses", 0L, rb);
}
}
@Test
public void testServerAddress() throws IOException {
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).build();
InetSocketAddress bindAddr = null;
try {
bindAddr = NetUtils.getConnectAddress(server);
} finally {
server.stop();
}
assertEquals(InetAddress.getLocalHost(), bindAddr.getAddress());
}
@Test
public void testAuthorization() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
true);
// Expect to succeed
conf.set(ACL_CONFIG, "*");
doRPCs(conf, false);
// Reset authorization to expect failure
conf.set(ACL_CONFIG, "invalid invalid");
doRPCs(conf, true);
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
// Expect to succeed
conf.set(ACL_CONFIG, "*");
doRPCs(conf, false);
// Reset authorization to expect failure
conf.set(ACL_CONFIG, "invalid invalid");
doRPCs(conf, true);
}
/**
* Switch off setting socketTimeout values on RPC sockets.
* Verify that RPC calls still work ok.
*/
public void testNoPings() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean("ipc.client.ping", false);
new TestRPC().testCallsInternal(conf);
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
new TestRPC().testCallsInternal(conf);
}
/**
* Test stopping a non-registered proxy
* @throws IOException
*/
@Test(expected=HadoopIllegalArgumentException.class)
public void testStopNonRegisteredProxy() throws IOException {
RPC.stopProxy(null);
}
/**
* Test that the mockProtocol helper returns mock proxies that can
* be stopped without error.
*/
@Test
public void testStopMockObject() throws IOException {
RPC.stopProxy(MockitoUtil.mockProtocol(TestProtocol.class));
}
@Test
public void testStopProxy() throws IOException {
StoppedProtocol proxy = RPC.getProxy(StoppedProtocol.class,
StoppedProtocol.versionID, null, conf);
StoppedInvocationHandler invocationHandler = (StoppedInvocationHandler)
Proxy.getInvocationHandler(proxy);
assertEquals(0, invocationHandler.getCloseCalled());
RPC.stopProxy(proxy);
assertEquals(1, invocationHandler.getCloseCalled());
}
@Test
public void testWrappedStopProxy() throws IOException {
StoppedProtocol wrappedProxy = RPC.getProxy(StoppedProtocol.class,
StoppedProtocol.versionID, null, conf);
StoppedInvocationHandler invocationHandler = (StoppedInvocationHandler)
Proxy.getInvocationHandler(wrappedProxy);
StoppedProtocol proxy = (StoppedProtocol) RetryProxy.create(StoppedProtocol.class,
wrappedProxy, RetryPolicies.RETRY_FOREVER);
assertEquals(0, invocationHandler.getCloseCalled());
RPC.stopProxy(proxy);
assertEquals(1, invocationHandler.getCloseCalled());
}
@Test
public void testErrorMsgForInsecureClient() throws IOException {
Configuration serverConf = new Configuration(conf);
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,
serverConf);
UserGroupInformation.setConfiguration(serverConf);
final Server server = new RPC.Builder(serverConf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).build();
server.start();
UserGroupInformation.setConfiguration(conf);
boolean succeeded = false;
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
TestProtocol proxy = null;
try {
proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
proxy.echo("");
} catch (RemoteException e) {
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
assertEquals("RPC error code should be UNAUTHORIZED", RpcErrorCodeProto.FATAL_UNAUTHORIZED, e.getErrorCode());
assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
succeeded = true;
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
assertTrue(succeeded);
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
UserGroupInformation.setConfiguration(serverConf);
final Server multiServer = new RPC.Builder(serverConf)
.setProtocol(TestProtocol.class).setInstance(new TestImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.build();
multiServer.start();
succeeded = false;
final InetSocketAddress mulitServerAddr =
NetUtils.getConnectAddress(multiServer);
proxy = null;
try {
UserGroupInformation.setConfiguration(conf);
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, mulitServerAddr, conf);
proxy.echo("");
} catch (RemoteException e) {
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
assertEquals("RPC error code should be UNAUTHORIZED", RpcErrorCodeProto.FATAL_UNAUTHORIZED, e.getErrorCode());
assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
succeeded = true;
} finally {
multiServer.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
assertTrue(succeeded);
}
/**
* Count the number of threads that have a stack frame containing
* the given string
*/
private static int countThreads(String search) {
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
int count = 0;
ThreadInfo[] infos = threadBean.getThreadInfo(threadBean.getAllThreadIds(), 20);
for (ThreadInfo info : infos) {
if (info == null) continue;
for (StackTraceElement elem : info.getStackTrace()) {
if (elem.getClassName().contains(search)) {
count++;
break;
}
}
}
return count;
}
/**
* Test that server.stop() properly stops all threads
*/
@Test
public void testStopsAllThreads() throws IOException, InterruptedException {
int threadsBefore = countThreads("Server$Listener$Reader");
assertEquals("Expect no Reader threads running before test",
0, threadsBefore);
final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).build();
server.start();
try {
// Wait for at least one reader thread to start
int threadsRunning = 0;
long totalSleepTime = 0;
do {
totalSleepTime += 10;
Thread.sleep(10);
threadsRunning = countThreads("Server$Listener$Reader");
} while (threadsRunning == 0 && totalSleepTime < 5000);
// Validate that at least one thread started (we didn't timeout)
threadsRunning = countThreads("Server$Listener$Reader");
assertTrue(threadsRunning > 0);
} finally {
server.stop();
}
int threadsAfter = countThreads("Server$Listener$Reader");
assertEquals("Expect no Reader threads left running after test",
0, threadsAfter);
}
@Test
public void testRPCBuilder() throws IOException {
// Test mandatory field conf
try {
new RPC.Builder(null).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).build();
fail("Didn't throw HadoopIllegalArgumentException");
} catch (Exception e) {
if (!(e instanceof HadoopIllegalArgumentException)) {
fail("Expecting HadoopIllegalArgumentException but caught " + e);
}
}
// Test mandatory field protocol
try {
new RPC.Builder(conf).setInstance(new TestImpl()).setBindAddress(ADDRESS)
.setPort(0).setNumHandlers(5).setVerbose(true).build();
fail("Didn't throw HadoopIllegalArgumentException");
} catch (Exception e) {
if (!(e instanceof HadoopIllegalArgumentException)) {
fail("Expecting HadoopIllegalArgumentException but caught " + e);
}
}
// Test mandatory field instance
try {
new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5)
.setVerbose(true).build();
fail("Didn't throw HadoopIllegalArgumentException");
} catch (Exception e) {
if (!(e instanceof HadoopIllegalArgumentException)) {
fail("Expecting HadoopIllegalArgumentException but caught " + e);
}
}
}
@Test(timeout=90000)
public void testRPCInterruptedSimple() throws IOException {
final Configuration conf = new Configuration();
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS)
.setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(null).build();
server.start();
InetSocketAddress addr = NetUtils.getConnectAddress(server);
final TestProtocol proxy = RPC.getProxy(
TestProtocol.class, TestProtocol.versionID, addr, conf);
// Connect to the server
proxy.ping();
// Interrupt self, try another call
Thread.currentThread().interrupt();
try {
proxy.ping();
fail("Interruption did not cause IPC to fail");
} catch (IOException ioe) {
if (!ioe.toString().contains("InterruptedException")) {
throw ioe;
}
// clear interrupt status for future tests
Thread.interrupted();
} finally {
server.stop();
}
}
@Test(timeout=30000)
public void testRPCInterrupted() throws IOException, InterruptedException {
final Configuration conf = new Configuration();
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS)
.setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(null).build();
server.start();
int numConcurrentRPC = 200;
InetSocketAddress addr = NetUtils.getConnectAddress(server);
final CyclicBarrier barrier = new CyclicBarrier(numConcurrentRPC);
final CountDownLatch latch = new CountDownLatch(numConcurrentRPC);
final AtomicBoolean leaderRunning = new AtomicBoolean(true);
final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
Thread leaderThread = null;
for (int i = 0; i < numConcurrentRPC; i++) {
final int num = i;
final TestProtocol proxy = RPC.getProxy(
TestProtocol.class, TestProtocol.versionID, addr, conf);
Thread rpcThread = new Thread(new Runnable() {
@Override
public void run() {
try {
barrier.await();
while (num == 0 || leaderRunning.get()) {
proxy.slowPing(false);
}
proxy.slowPing(false);
} catch (Exception e) {
if (num == 0) {
leaderRunning.set(false);
} else {
error.set(e);
}
LOG.error(e);
} finally {
latch.countDown();
}
}
});
rpcThread.start();
if (leaderThread == null) {
leaderThread = rpcThread;
}
}
// let threads get past the barrier
Thread.sleep(1000);
// stop a single thread
while (leaderRunning.get()) {
leaderThread.interrupt();
}
latch.await();
// should not cause any other thread to get an error
assertTrue("rpc got exception " + error.get(), error.get() == null);
server.stop();
}
@Test
public void testConnectionPing() throws Exception {
Configuration conf = new Configuration();
int pingInterval = 50;
conf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, pingInterval);
final Server server = new RPC.Builder(conf)
.setProtocol(TestProtocol.class).setInstance(new TestImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.build();
server.start();
final TestProtocol proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, server.getListenerAddress(), conf);
try {
// this call will throw exception if server couldn't decode the ping
proxy.sleep(pingInterval*4);
} finally {
if (proxy != null) RPC.stopProxy(proxy);
server.stop();
}
}
@Test
public void testRpcMetrics() throws Exception {
Configuration configuration = new Configuration();
final int interval = 1;
configuration.setBoolean(CommonConfigurationKeys.
RPC_METRICS_QUANTILE_ENABLE, true);
configuration.set(CommonConfigurationKeys.
RPC_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
final Server server = new RPC.Builder(configuration)
.setProtocol(TestProtocol.class).setInstance(new TestImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.build();
server.start();
final TestProtocol proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, server.getListenerAddress(), configuration);
try {
for (int i=0; i<1000; i++) {
proxy.ping();
proxy.echo("" + i);
}
MetricsRecordBuilder rpcMetrics =
getMetrics(server.getRpcMetrics().name());
assertTrue("Expected non-zero rpc queue time",
getLongCounter("RpcQueueTimeNumOps", rpcMetrics) > 0);
assertTrue("Expected non-zero rpc processing time",
getLongCounter("RpcProcessingTimeNumOps", rpcMetrics) > 0);
MetricsAsserts.assertQuantileGauges("RpcQueueTime" + interval + "s",
rpcMetrics);
MetricsAsserts.assertQuantileGauges("RpcProcessingTime" + interval + "s",
rpcMetrics);
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
/**
* Verify the RPC server can shutdown properly when callQueue is full.
*/
@Test (timeout=30000)
public void testRPCServerShutdown() throws Exception {
final int numClients = 3;
final List<Future<Void>> res = new ArrayList<Future<Void>>();
final ExecutorService executorService =
Executors.newFixedThreadPool(numClients);
final Configuration conf = new Configuration();
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
final Server server = new RPC.Builder(conf)
.setProtocol(TestProtocol.class).setInstance(new TestImpl())
.setBindAddress(ADDRESS).setPort(0)
.setQueueSizePerHandler(1).setNumHandlers(1).setVerbose(true)
.build();
server.start();
final TestProtocol proxy =
RPC.getProxy(TestProtocol.class, TestProtocol.versionID,
NetUtils.getConnectAddress(server), conf);
try {
// start a sleep RPC call to consume the only handler thread.
// Start another sleep RPC call to make callQueue full.
// Start another sleep RPC call to make reader thread block on CallQueue.
for (int i = 0; i < numClients; i++) {
res.add(executorService.submit(
new Callable<Void>() {
@Override
public Void call() throws IOException, InterruptedException {
proxy.sleep(100000);
return null;
}
}));
}
while (server.getCallQueueLen() != 1
&& countThreads(CallQueueManager.class.getName()) != 1
&& countThreads(TestProtocol.class.getName()) != 1) {
Thread.sleep(100);
}
} finally {
try {
server.stop();
assertEquals("Not enough clients", numClients, res.size());
for (Future<Void> f : res) {
try {
f.get();
fail("Future get should not return");
} catch (ExecutionException e) {
assertTrue("Unexpected exception: " + e,
e.getCause() instanceof IOException);
LOG.info("Expected exception", e.getCause());
}
}
} finally {
RPC.stopProxy(proxy);
executorService.shutdown();
}
}
}
/**
* Test RPC backoff.
*/
@Test (timeout=30000)
public void testClientBackOff() throws Exception {
boolean succeeded = false;
final int numClients = 2;
final List<Future<Void>> res = new ArrayList<Future<Void>>();
final ExecutorService executorService =
Executors.newFixedThreadPool(numClients);
final Configuration conf = new Configuration();
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
conf.setBoolean(CommonConfigurationKeys.IPC_CALLQUEUE_NAMESPACE +
".0." + CommonConfigurationKeys.IPC_BACKOFF_ENABLE, true);
final Server server = new RPC.Builder(conf)
.setProtocol(TestProtocol.class).setInstance(new TestImpl())
.setBindAddress(ADDRESS).setPort(0)
.setQueueSizePerHandler(1).setNumHandlers(1).setVerbose(true)
.build();
server.start();
final TestProtocol proxy =
RPC.getProxy(TestProtocol.class, TestProtocol.versionID,
NetUtils.getConnectAddress(server), conf);
try {
// start a sleep RPC call to consume the only handler thread.
// Start another sleep RPC call to make callQueue full.
// Start another sleep RPC call to make reader thread block on CallQueue.
for (int i = 0; i < numClients; i++) {
res.add(executorService.submit(
new Callable<Void>() {
@Override
public Void call() throws IOException, InterruptedException {
proxy.sleep(100000);
return null;
}
}));
}
while (server.getCallQueueLen() != 1
&& countThreads(CallQueueManager.class.getName()) != 1) {
Thread.sleep(100);
}
try {
proxy.sleep(100);
} catch (RemoteException e) {
IOException unwrapExeption = e.unwrapRemoteException();
if (unwrapExeption instanceof RetriableException) {
succeeded = true;
}
}
} finally {
server.stop();
RPC.stopProxy(proxy);
executorService.shutdown();
}
assertTrue("RetriableException not received", succeeded);
}
public static void main(String[] args) throws IOException {
new TestRPC().testCallsInternal(conf);
}
}
| 38,444 | 32.517873 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.Arrays;
import org.junit.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
public class TestDecayRpcScheduler {
private Schedulable mockCall(String id) {
Schedulable mockCall = mock(Schedulable.class);
UserGroupInformation ugi = mock(UserGroupInformation.class);
when(ugi.getUserName()).thenReturn(id);
when(mockCall.getUserGroupInformation()).thenReturn(ugi);
return mockCall;
}
private DecayRpcScheduler scheduler;
@Test(expected=IllegalArgumentException.class)
public void testNegativeScheduler() {
scheduler = new DecayRpcScheduler(-1, "", new Configuration());
}
@Test(expected=IllegalArgumentException.class)
public void testZeroScheduler() {
scheduler = new DecayRpcScheduler(0, "", new Configuration());
}
@Test
public void testParsePeriod() {
// By default
scheduler = new DecayRpcScheduler(1, "", new Configuration());
assertEquals(DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_DEFAULT,
scheduler.getDecayPeriodMillis());
// Custom
Configuration conf = new Configuration();
conf.setLong("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,
1058);
scheduler = new DecayRpcScheduler(1, "ns", conf);
assertEquals(1058L, scheduler.getDecayPeriodMillis());
}
@Test
public void testParseFactor() {
// Default
scheduler = new DecayRpcScheduler(1, "", new Configuration());
assertEquals(DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT,
scheduler.getDecayFactor(), 0.00001);
// Custom
Configuration conf = new Configuration();
conf.set("prefix." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,
"0.125");
scheduler = new DecayRpcScheduler(1, "prefix", conf);
assertEquals(0.125, scheduler.getDecayFactor(), 0.00001);
}
public void assertEqualDecimalArrays(double[] a, double[] b) {
assertEquals(a.length, b.length);
for(int i = 0; i < a.length; i++) {
assertEquals(a[i], b[i], 0.00001);
}
}
@Test
public void testParseThresholds() {
// Defaults vary by number of queues
Configuration conf = new Configuration();
scheduler = new DecayRpcScheduler(1, "", conf);
assertEqualDecimalArrays(new double[]{}, scheduler.getThresholds());
scheduler = new DecayRpcScheduler(2, "", conf);
assertEqualDecimalArrays(new double[]{0.5}, scheduler.getThresholds());
scheduler = new DecayRpcScheduler(3, "", conf);
assertEqualDecimalArrays(new double[]{0.25, 0.5}, scheduler.getThresholds());
scheduler = new DecayRpcScheduler(4, "", conf);
assertEqualDecimalArrays(new double[]{0.125, 0.25, 0.5}, scheduler.getThresholds());
// Custom
conf = new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY,
"1, 10, 20, 50, 85");
scheduler = new DecayRpcScheduler(6, "ns", conf);
assertEqualDecimalArrays(new double[]{0.01, 0.1, 0.2, 0.5, 0.85}, scheduler.getThresholds());
}
@Test
public void testAccumulate() {
Configuration conf = new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY, "99999999"); // Never flush
scheduler = new DecayRpcScheduler(1, "ns", conf);
assertEquals(0, scheduler.getCallCountSnapshot().size()); // empty first
scheduler.getPriorityLevel(mockCall("A"));
assertEquals(1, scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(1, scheduler.getCallCountSnapshot().get("A").longValue());
scheduler.getPriorityLevel(mockCall("A"));
scheduler.getPriorityLevel(mockCall("B"));
scheduler.getPriorityLevel(mockCall("A"));
assertEquals(3, scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(1, scheduler.getCallCountSnapshot().get("B").longValue());
}
@Test
public void testDecay() {
Configuration conf = new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY, "999999999"); // Never
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY, "0.5");
scheduler = new DecayRpcScheduler(1, "ns", conf);
assertEquals(0, scheduler.getTotalCallSnapshot());
for (int i = 0; i < 4; i++) {
scheduler.getPriorityLevel(mockCall("A"));
}
for (int i = 0; i < 8; i++) {
scheduler.getPriorityLevel(mockCall("B"));
}
assertEquals(12, scheduler.getTotalCallSnapshot());
assertEquals(4, scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(8, scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(6, scheduler.getTotalCallSnapshot());
assertEquals(2, scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(4, scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(3, scheduler.getTotalCallSnapshot());
assertEquals(1, scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(2, scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(1, scheduler.getTotalCallSnapshot());
assertEquals(null, scheduler.getCallCountSnapshot().get("A"));
assertEquals(1, scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(0, scheduler.getTotalCallSnapshot());
assertEquals(null, scheduler.getCallCountSnapshot().get("A"));
assertEquals(null, scheduler.getCallCountSnapshot().get("B"));
}
@Test
public void testPriority() {
Configuration conf = new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY, "99999999"); // Never flush
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY,
"25, 50, 75");
scheduler = new DecayRpcScheduler(4, "ns", conf);
assertEquals(0, scheduler.getPriorityLevel(mockCall("A")));
assertEquals(2, scheduler.getPriorityLevel(mockCall("A")));
assertEquals(0, scheduler.getPriorityLevel(mockCall("B")));
assertEquals(1, scheduler.getPriorityLevel(mockCall("B")));
assertEquals(0, scheduler.getPriorityLevel(mockCall("C")));
assertEquals(0, scheduler.getPriorityLevel(mockCall("C")));
assertEquals(1, scheduler.getPriorityLevel(mockCall("A")));
assertEquals(1, scheduler.getPriorityLevel(mockCall("A")));
assertEquals(1, scheduler.getPriorityLevel(mockCall("A")));
assertEquals(2, scheduler.getPriorityLevel(mockCall("A")));
}
@Test(timeout=2000)
public void testPeriodic() throws InterruptedException {
Configuration conf = new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY, "10");
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY, "0.5");
scheduler = new DecayRpcScheduler(1, "ns", conf);
assertEquals(10, scheduler.getDecayPeriodMillis());
assertEquals(0, scheduler.getTotalCallSnapshot());
for (int i = 0; i < 64; i++) {
scheduler.getPriorityLevel(mockCall("A"));
}
// It should eventually decay to zero
while (scheduler.getTotalCallSnapshot() > 0) {
Thread.sleep(10);
}
}
}
| 8,442 | 36.524444 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRetryCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.ipc.RPC.RpcKind;
import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Tests for {@link RetryCache}
*/
public class TestRetryCache {
private static final byte[] CLIENT_ID = ClientId.getClientId();
private static int callId = 100;
private static final Random r = new Random();
private static final TestServer testServer = new TestServer();
@Before
public void setup() {
testServer.resetCounters();
}
static class TestServer {
AtomicInteger retryCount = new AtomicInteger();
AtomicInteger operationCount = new AtomicInteger();
private RetryCache retryCache = new RetryCache("TestRetryCache", 1,
100 * 1000 * 1000 * 1000L);
/**
* A server method implemented using {@link RetryCache}.
*
* @param input is returned back in echo, if {@code success} is true.
* @param failureOuput returned on failure, if {@code success} is false.
* @param methodTime time taken by the operation. By passing smaller/larger
* value one can simulate an operation that takes short/long time.
* @param success whether this operation completes successfully or not
* @return return the input parameter {@code input}, if {@code success} is
* true, else return {@code failureOutput}.
*/
int echo(int input, int failureOutput, long methodTime, boolean success)
throws InterruptedException {
CacheEntryWithPayload entry = RetryCache.waitForCompletion(retryCache,
null);
if (entry != null && entry.isSuccess()) {
System.out.println("retryCount incremented " + retryCount.get());
retryCount.incrementAndGet();
return (Integer) entry.getPayload();
}
try {
operationCount.incrementAndGet();
if (methodTime > 0) {
Thread.sleep(methodTime);
}
} finally {
RetryCache.setState(entry, success, input);
}
return success ? input : failureOutput;
}
void resetCounters() {
retryCount.set(0);
operationCount.set(0);
}
}
public static Server.Call newCall() {
return new Server.Call(++callId, 1, null, null,
RpcKind.RPC_PROTOCOL_BUFFER, CLIENT_ID);
}
/**
* This simlulates a long server retried operations. Multiple threads start an
* operation that takes long time and finally succeeds. The retries in this
* case end up waiting for the current operation to complete. All the retries
* then complete based on the entry in the retry cache.
*/
@Test
public void testLongOperationsSuccessful() throws Exception {
// Test long successful operations
// There is no entry in cache expected when the first operation starts
testOperations(r.nextInt(), 100, 20, true, false, newCall());
}
/**
* This simlulates a long server operation. Multiple threads start an
* operation that takes long time and finally fails. The retries in this case
* end up waiting for the current operation to complete. All the retries end
* up performing the operation again.
*/
@Test
public void testLongOperationsFailure() throws Exception {
// Test long failed operations
// There is no entry in cache expected when the first operation starts
testOperations(r.nextInt(), 100, 20, false, false, newCall());
}
/**
* This simlulates a short server operation. Multiple threads start an
* operation that takes very short time and finally succeeds. The retries in
* this case do not wait long for the current operation to complete. All the
* retries then complete based on the entry in the retry cache.
*/
@Test
public void testShortOperationsSuccess() throws Exception {
// Test long failed operations
// There is no entry in cache expected when the first operation starts
testOperations(r.nextInt(), 25, 0, false, false, newCall());
}
/**
* This simlulates a short server operation. Multiple threads start an
* operation that takes short time and finally fails. The retries in this case
* do not wait for the current operation to complete. All the retries end up
* performing the operation again.
*/
@Test
public void testShortOperationsFailure() throws Exception {
// Test long failed operations
// There is no entry in cache expected when the first operation starts
testOperations(r.nextInt(), 25, 0, false, false, newCall());
}
@Test
public void testRetryAfterSuccess() throws Exception {
// Previous operation successfully completed
Server.Call call = newCall();
int input = r.nextInt();
Server.getCurCall().set(call);
testServer.echo(input, input + 1, 5, true);
testOperations(input, 25, 0, true, true, call);
}
@Test
public void testRetryAfterFailure() throws Exception {
// Previous operation failed
Server.Call call = newCall();
int input = r.nextInt();
Server.getCurCall().set(call);
testServer.echo(input, input + 1, 5, false);
testOperations(input, 25, 0, false, true, call);
}
public void testOperations(final int input, final int numberOfThreads,
final int pause, final boolean success, final boolean attemptedBefore,
final Server.Call call) throws InterruptedException, ExecutionException {
final int failureOutput = input + 1;
ExecutorService executorService = Executors
.newFixedThreadPool(numberOfThreads);
List<Future<Integer>> list = new ArrayList<Future<Integer>>();
for (int i = 0; i < numberOfThreads; i++) {
Callable<Integer> worker = new Callable<Integer>() {
@Override
public Integer call() throws Exception {
Server.getCurCall().set(call);
Assert.assertEquals(Server.getCurCall().get(), call);
int randomPause = pause == 0 ? pause : r.nextInt(pause);
return testServer.echo(input, failureOutput, randomPause, success);
}
};
Future<Integer> submit = executorService.submit(worker);
list.add(submit);
}
Assert.assertEquals(numberOfThreads, list.size());
for (Future<Integer> future : list) {
if (success) {
Assert.assertEquals(input, future.get().intValue());
} else {
Assert.assertEquals(failureOutput, future.get().intValue());
}
}
if (success) {
// If the operation was successful, all the subsequent operations
// by other threads should be retries. Operation count should be 1.
int retries = numberOfThreads + (attemptedBefore ? 0 : -1);
Assert.assertEquals(1, testServer.operationCount.get());
Assert.assertEquals(retries, testServer.retryCount.get());
} else {
// If the operation failed, all the subsequent operations
// should execute once more, hence the retry count should be 0 and
// operation count should be the number of tries
int opCount = numberOfThreads + (attemptedBefore ? 1 : 0);
Assert.assertEquals(opCount, testServer.operationCount.get());
Assert.assertEquals(0, testServer.retryCount.get());
}
}
}
| 8,306 | 37.458333 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMappingWithDependency.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import junit.framework.TestCase;
import org.junit.Test;
public class TestScriptBasedMappingWithDependency extends TestCase {
public TestScriptBasedMappingWithDependency() {
}
@Test
public void testNoArgsMeansNoResult() {
Configuration conf = new Configuration();
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,
ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename-1");
conf.set(ScriptBasedMappingWithDependency.DEPENDENCY_SCRIPT_FILENAME_KEY,
"any-filename-2");
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY, 10);
ScriptBasedMappingWithDependency mapping = createMapping(conf);
List<String> names = new ArrayList<String>();
names.add("some.machine.name");
names.add("other.machine.name");
List<String> result = mapping.resolve(names);
assertNull("Expected an empty list for resolve", result);
result = mapping.getDependency("some.machine.name");
assertNull("Expected an empty list for getDependency", result);
}
@Test
public void testNoFilenameMeansSingleSwitch() throws Throwable {
Configuration conf = new Configuration();
ScriptBasedMapping mapping = createMapping(conf);
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
assertTrue("Expected to be single switch",
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
@Test
public void testFilenameMeansMultiSwitch() throws Throwable {
Configuration conf = new Configuration();
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
ScriptBasedMapping mapping = createMapping(conf);
assertFalse("Expected to be multi switch", mapping.isSingleSwitch());
mapping.setConf(new Configuration());
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
}
@Test
public void testNullConfig() throws Throwable {
ScriptBasedMapping mapping = createMapping(null);
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
}
private ScriptBasedMappingWithDependency createMapping(Configuration conf) {
ScriptBasedMappingWithDependency mapping =
new ScriptBasedMappingWithDependency();
mapping.setConf(conf);
return mapping;
}
}
| 3,242 | 36.275862 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import org.apache.hadoop.conf.Configuration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Implements the {@link DNSToSwitchMapping} via static mappings. Used
* in testcases that simulate racks, and in the
* {@link org.apache.hadoop.hdfs.MiniDFSCluster}
*
* A shared, static mapping is used; to reset it call {@link #resetMap()}.
*
* When an instance of the class has its {@link #setConf(Configuration)}
* method called, nodes listed in the configuration will be added to the map.
* These do not get removed when the instance is garbage collected.
*
* The switch mapping policy of this class is the same as for the
* {@link ScriptBasedMapping} -the presence of a non-empty topology script.
* The script itself is not used.
*/
public class StaticMapping extends AbstractDNSToSwitchMapping {
/**
* Key to define the node mapping as a comma-delimited list of host=rack
* mappings, e.g. <code>host1=r1,host2=r1,host3=r2</code>.
* <p/>
* Value: {@value}
* <p/>
* <b>Important: </b>spaces not trimmed and are considered significant.
*/
public static final String KEY_HADOOP_CONFIGURED_NODE_MAPPING =
"hadoop.configured.node.mapping";
/**
* Configure the mapping by extracting any mappings defined in the
* {@link #KEY_HADOOP_CONFIGURED_NODE_MAPPING} field
* @param conf new configuration
*/
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
String[] mappings = conf.getStrings(KEY_HADOOP_CONFIGURED_NODE_MAPPING);
if (mappings != null) {
for (String str : mappings) {
String host = str.substring(0, str.indexOf('='));
String rack = str.substring(str.indexOf('=') + 1);
addNodeToRack(host, rack);
}
}
}
}
/**
* retained lower case setter for compatibility reasons; relays to
* {@link #setConf(Configuration)}
* @param conf new configuration
*/
public void setconf(Configuration conf) {
setConf(conf);
}
/* Only one instance per JVM */
private static final Map<String, String> nameToRackMap = new HashMap<String, String>();
/**
* Add a node to the static map. The moment any entry is added to the map,
* the map goes multi-rack.
* @param name node name
* @param rackId rack ID
*/
public static void addNodeToRack(String name, String rackId) {
synchronized (nameToRackMap) {
nameToRackMap.put(name, rackId);
}
}
@Override
public List<String> resolve(List<String> names) {
List<String> m = new ArrayList<String>();
synchronized (nameToRackMap) {
for (String name : names) {
String rackId;
if ((rackId = nameToRackMap.get(name)) != null) {
m.add(rackId);
} else {
m.add(NetworkTopology.DEFAULT_RACK);
}
}
return m;
}
}
/**
* The switch policy of this mapping is driven by the same policy
* as the Scripted mapping: the presence of the script name in
* the configuration file
* @return false, always
*/
@Override
public boolean isSingleSwitch() {
return isSingleSwitchByScriptPolicy();
}
/**
* Get a copy of the map (for diagnostics)
* @return a clone of the map or null for none known
*/
@Override
public Map<String, String> getSwitchMap() {
synchronized (nameToRackMap) {
return new HashMap<String, String>(nameToRackMap);
}
}
@Override
public String toString() {
return "static mapping with single switch = " + isSingleSwitch();
}
/**
* Clear the map
*/
public static void resetMap() {
synchronized (nameToRackMap) {
nameToRackMap.clear();
}
}
public void reloadCachedMappings() {
// reloadCachedMappings does nothing for StaticMapping; there is
// nowhere to reload from since all data is in memory.
}
@Override
public void reloadCachedMappings(List<String> names) {
// reloadCachedMappings does nothing for StaticMapping; there is
// nowhere to reload from since all data is in memory.
}
}
| 4,951 | 29.567901 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
import static org.junit.Assert.assertEquals;
import com.google.common.base.Charsets;
import com.google.common.io.Files;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.Test;
public class TestTableMapping {
private String hostName1 = "1.2.3.4";
private String hostName2 = "5.6.7.8";
@Test
public void testResolve() throws IOException {
File mapFile = File.createTempFile(getClass().getSimpleName() +
".testResolve", ".txt");
Files.write(hostName1 + " /rack1\n" +
hostName2 + "\t/rack2\n", mapFile, Charsets.UTF_8);
mapFile.deleteOnExit();
TableMapping mapping = new TableMapping();
Configuration conf = new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile.getCanonicalPath());
mapping.setConf(conf);
List<String> names = new ArrayList<String>();
names.add(hostName1);
names.add(hostName2);
List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size());
assertEquals("/rack1", result.get(0));
assertEquals("/rack2", result.get(1));
}
@Test
public void testTableCaching() throws IOException {
File mapFile = File.createTempFile(getClass().getSimpleName() +
".testTableCaching", ".txt");
Files.write(hostName1 + " /rack1\n" +
hostName2 + "\t/rack2\n", mapFile, Charsets.UTF_8);
mapFile.deleteOnExit();
TableMapping mapping = new TableMapping();
Configuration conf = new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile.getCanonicalPath());
mapping.setConf(conf);
List<String> names = new ArrayList<String>();
names.add(hostName1);
names.add(hostName2);
List<String> result1 = mapping.resolve(names);
assertEquals(names.size(), result1.size());
assertEquals("/rack1", result1.get(0));
assertEquals("/rack2", result1.get(1));
// unset the file, see if it gets read again
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, "some bad value for a file");
List<String> result2 = mapping.resolve(names);
assertEquals(result1, result2);
}
@Test
public void testNoFile() {
TableMapping mapping = new TableMapping();
Configuration conf = new Configuration();
mapping.setConf(conf);
List<String> names = new ArrayList<String>();
names.add(hostName1);
names.add(hostName2);
List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size());
assertEquals(NetworkTopology.DEFAULT_RACK, result.get(0));
assertEquals(NetworkTopology.DEFAULT_RACK, result.get(1));
}
@Test
public void testFileDoesNotExist() {
TableMapping mapping = new TableMapping();
Configuration conf = new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, "/this/file/does/not/exist");
mapping.setConf(conf);
List<String> names = new ArrayList<String>();
names.add(hostName1);
names.add(hostName2);
List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size());
assertEquals(result.get(0), NetworkTopology.DEFAULT_RACK);
assertEquals(result.get(1), NetworkTopology.DEFAULT_RACK);
}
@Test
public void testClearingCachedMappings() throws IOException {
File mapFile = File.createTempFile(getClass().getSimpleName() +
".testClearingCachedMappings", ".txt");
Files.write(hostName1 + " /rack1\n" +
hostName2 + "\t/rack2\n", mapFile, Charsets.UTF_8);
mapFile.deleteOnExit();
TableMapping mapping = new TableMapping();
Configuration conf = new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile.getCanonicalPath());
mapping.setConf(conf);
List<String> names = new ArrayList<String>();
names.add(hostName1);
names.add(hostName2);
List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size());
assertEquals("/rack1", result.get(0));
assertEquals("/rack2", result.get(1));
Files.write("", mapFile, Charsets.UTF_8);
mapping.reloadCachedMappings();
names = new ArrayList<String>();
names.add(hostName1);
names.add(hostName2);
result = mapping.resolve(names);
assertEquals(names.size(), result.size());
assertEquals(NetworkTopology.DEFAULT_RACK, result.get(0));
assertEquals(NetworkTopology.DEFAULT_RACK, result.get(1));
}
@Test(timeout=60000)
public void testBadFile() throws IOException {
File mapFile = File.createTempFile(getClass().getSimpleName() +
".testBadFile", ".txt");
Files.write("bad contents", mapFile, Charsets.UTF_8);
mapFile.deleteOnExit();
TableMapping mapping = new TableMapping();
Configuration conf = new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile.getCanonicalPath());
mapping.setConf(conf);
List<String> names = new ArrayList<String>();
names.add(hostName1);
names.add(hostName2);
List<String> result = mapping.resolve(names);
assertEquals(names.size(), result.size());
assertEquals(result.get(0), NetworkTopology.DEFAULT_RACK);
assertEquals(result.get(1), NetworkTopology.DEFAULT_RACK);
}
}
| 6,239 | 32.368984 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.io.IOException;
import java.net.ServerSocket;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class ServerSocketUtil {
private static final Log LOG = LogFactory.getLog(ServerSocketUtil.class);
/**
* Port scan & allocate is how most other apps find ports
*
* @param port given port
* @param retries number of retires
* @return
* @throws IOException
*/
public static int getPort(int port, int retries) throws IOException {
Random rand = new Random();
int tryPort = port;
int tries = 0;
while (true) {
if (tries > 0) {
tryPort = port + rand.nextInt(65535 - port);
}
LOG.info("Using port " + tryPort);
try (ServerSocket s = new ServerSocket(tryPort)) {
return tryPort;
} catch (IOException e) {
tries++;
if (tries >= retries) {
LOG.info("Port is already in use; giving up");
throw e;
} else {
LOG.info("Port is already in use; trying again");
}
}
}
}
}
| 1,939 | 29.3125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestStaticMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Test the static mapping class.
* Because the map is actually static, this map needs to be reset for every test
*/
public class TestStaticMapping extends Assert {
private static final Log LOG = LogFactory.getLog(TestStaticMapping.class);
/**
* Reset the map then create a new instance of the {@link StaticMapping}
* class with a null configuration
* @return a new instance
*/
private StaticMapping newInstance() {
StaticMapping.resetMap();
return new StaticMapping();
}
/**
* Reset the map then create a new instance of the {@link StaticMapping}
* class with the topology script in the configuration set to
* the parameter
* @param script a (never executed) script, can be null
* @return a new instance
*/
private StaticMapping newInstance(String script) {
StaticMapping mapping = newInstance();
mapping.setConf(createConf(script));
return mapping;
}
/**
* Create a configuration with a specific topology script
* @param script a (never executed) script, can be null
* @return a configuration
*/
private Configuration createConf(String script) {
Configuration conf = new Configuration();
if (script != null) {
conf.set(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
script);
} else {
conf.unset(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
}
return conf;
}
private void assertSingleSwitch(DNSToSwitchMapping mapping) {
assertEquals("Expected a single switch mapping "
+ mapping,
true,
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
private void assertMultiSwitch(DNSToSwitchMapping mapping) {
assertEquals("Expected a multi switch mapping "
+ mapping,
false,
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
protected void assertMapSize(AbstractDNSToSwitchMapping switchMapping, int expectedSize) {
assertEquals(
"Expected two entries in the map " + switchMapping.dumpTopology(),
expectedSize, switchMapping.getSwitchMap().size());
}
private List<String> createQueryList() {
List<String> l1 = new ArrayList<String>(2);
l1.add("n1");
l1.add("unknown");
return l1;
}
@Test
public void testStaticIsSingleSwitchOnNullScript() throws Throwable {
StaticMapping mapping = newInstance(null);
mapping.setConf(createConf(null));
assertSingleSwitch(mapping);
}
@Test
public void testStaticIsMultiSwitchOnScript() throws Throwable {
StaticMapping mapping = newInstance("ls");
assertMultiSwitch(mapping);
}
@Test
public void testAddResolveNodes() throws Throwable {
StaticMapping mapping = newInstance();
StaticMapping.addNodeToRack("n1", "/r1");
List<String> queryList = createQueryList();
List<String> resolved = mapping.resolve(queryList);
assertEquals(2, resolved.size());
assertEquals("/r1", resolved.get(0));
assertEquals(NetworkTopology.DEFAULT_RACK, resolved.get(1));
// get the switch map and examine it
Map<String, String> switchMap = mapping.getSwitchMap();
String topology = mapping.dumpTopology();
LOG.info(topology);
assertEquals(topology, 1, switchMap.size());
assertEquals(topology, "/r1", switchMap.get("n1"));
}
/**
* Verify that a configuration string builds a topology
*/
@Test
public void testReadNodesFromConfig() throws Throwable {
StaticMapping mapping = newInstance();
Configuration conf = new Configuration();
conf.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING, "n1=/r1,n2=/r2");
mapping.setConf(conf);
//even though we have inserted elements into the list, because
//it is driven by the script key in the configuration, it still
//thinks that it is single rack
assertSingleSwitch(mapping);
List<String> l1 = new ArrayList<String>(3);
l1.add("n1");
l1.add("unknown");
l1.add("n2");
List<String> resolved = mapping.resolve(l1);
assertEquals(3, resolved.size());
assertEquals("/r1", resolved.get(0));
assertEquals(NetworkTopology.DEFAULT_RACK, resolved.get(1));
assertEquals("/r2", resolved.get(2));
Map<String, String> switchMap = mapping.getSwitchMap();
String topology = mapping.dumpTopology();
LOG.info(topology);
assertEquals(topology, 2, switchMap.size());
assertEquals(topology, "/r1", switchMap.get("n1"));
assertNull(topology, switchMap.get("unknown"));
}
/**
* Verify that if the inner mapping is single-switch, so is the cached one
* @throws Throwable on any problem
*/
@Test
public void testCachingRelaysSingleSwitchQueries() throws Throwable {
//create a single switch map
StaticMapping staticMapping = newInstance(null);
assertSingleSwitch(staticMapping);
CachedDNSToSwitchMapping cachedMap =
new CachedDNSToSwitchMapping(staticMapping);
LOG.info("Mapping: " + cachedMap + "\n" + cachedMap.dumpTopology());
assertSingleSwitch(cachedMap);
}
/**
* Verify that if the inner mapping is multi-switch, so is the cached one
* @throws Throwable on any problem
*/
@Test
public void testCachingRelaysMultiSwitchQueries() throws Throwable {
StaticMapping staticMapping = newInstance("top");
assertMultiSwitch(staticMapping);
CachedDNSToSwitchMapping cachedMap =
new CachedDNSToSwitchMapping(staticMapping);
LOG.info("Mapping: " + cachedMap + "\n" + cachedMap.dumpTopology());
assertMultiSwitch(cachedMap);
}
/**
* This test verifies that resultion queries get relayed to the inner rack
* @throws Throwable on any problem
*/
@Test
public void testCachingRelaysResolveQueries() throws Throwable {
StaticMapping mapping = newInstance();
mapping.setConf(createConf("top"));
StaticMapping staticMapping = mapping;
CachedDNSToSwitchMapping cachedMap =
new CachedDNSToSwitchMapping(staticMapping);
assertMapSize(cachedMap, 0);
//add a node to the static map
StaticMapping.addNodeToRack("n1", "/r1");
//verify it is there
assertMapSize(staticMapping, 1);
//verify that the cache hasn't picked it up yet
assertMapSize(cachedMap, 0);
//now relay the query
cachedMap.resolve(createQueryList());
//and verify the cache is no longer empty
assertMapSize(cachedMap, 2);
}
/**
* This test verifies that resultion queries get relayed to the inner rack
* @throws Throwable on any problem
*/
@Test
public void testCachingCachesNegativeEntries() throws Throwable {
StaticMapping staticMapping = newInstance();
CachedDNSToSwitchMapping cachedMap =
new CachedDNSToSwitchMapping(staticMapping);
assertMapSize(cachedMap, 0);
assertMapSize(staticMapping, 0);
List<String> resolved = cachedMap.resolve(createQueryList());
//and verify the cache is no longer empty while the static map is
assertMapSize(staticMapping, 0);
assertMapSize(cachedMap, 2);
}
}
| 8,211 | 33.074689 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestDNS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.net.UnknownHostException;
import java.net.InetAddress;
import javax.naming.NameNotFoundException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Test host name and IP resolution and caching.
*/
public class TestDNS {
private static final Log LOG = LogFactory.getLog(TestDNS.class);
private static final String DEFAULT = "default";
/**
* Test that asking for the default hostname works
* @throws Exception if hostname lookups fail
*/
@Test
public void testGetLocalHost() throws Exception {
String hostname = DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname);
}
/**
* Test that repeated calls to getting the local host are fairly fast, and
* hence that caching is being used
* @throws Exception if hostname lookups fail
*/
@Test
public void testGetLocalHostIsFast() throws Exception {
String hostname1 = DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname1);
String hostname2 = DNS.getDefaultHost(DEFAULT);
long t1 = Time.now();
String hostname3 = DNS.getDefaultHost(DEFAULT);
long t2 = Time.now();
assertEquals(hostname3, hostname2);
assertEquals(hostname2, hostname1);
long interval = t2 - t1;
assertTrue(
"Took too long to determine local host - caching is not working",
interval < 20000);
}
/**
* Test that our local IP address is not null
* @throws Exception if something went wrong
*/
@Test
public void testLocalHostHasAnAddress() throws Exception {
assertNotNull(getLocalIPAddr());
}
private InetAddress getLocalIPAddr() throws UnknownHostException {
String hostname = DNS.getDefaultHost(DEFAULT);
InetAddress localhost = InetAddress.getByName(hostname);
return localhost;
}
/**
* Test null interface name
*/
@Test
public void testNullInterface() throws Exception {
try {
String host = DNS.getDefaultHost(null);
fail("Expected a NullPointerException, got " + host);
} catch (NullPointerException npe) {
// Expected
}
try {
String ip = DNS.getDefaultIP(null);
fail("Expected a NullPointerException, got " + ip);
} catch (NullPointerException npe) {
// Expected
}
}
/**
* Get the IP addresses of an unknown interface
*/
@Test
public void testIPsOfUnknownInterface() throws Exception {
try {
DNS.getIPs("name-of-an-unknown-interface");
fail("Got an IP for a bogus interface");
} catch (UnknownHostException e) {
assertEquals("No such interface name-of-an-unknown-interface",
e.getMessage());
}
}
/**
* Test the "default" IP addresses is the local IP addr
*/
@Test
public void testGetIPWithDefault() throws Exception {
String[] ips = DNS.getIPs(DEFAULT);
assertEquals("Should only return 1 default IP", 1, ips.length);
assertEquals(getLocalIPAddr().getHostAddress(), ips[0].toString());
String ip = DNS.getDefaultIP(DEFAULT);
assertEquals(ip, ips[0].toString());
}
/**
* TestCase: get our local address and reverse look it up
*/
@Test
public void testRDNS() throws Exception {
InetAddress localhost = getLocalIPAddr();
try {
String s = DNS.reverseDns(localhost, null);
LOG.info("Local revers DNS hostname is " + s);
} catch (NameNotFoundException e) {
if (!localhost.isLinkLocalAddress() || localhost.isLoopbackAddress()) {
//these addresses probably won't work with rDNS anyway, unless someone
//has unusual entries in their DNS server mapping 1.0.0.127 to localhost
LOG.info("Reverse DNS failing as due to incomplete networking", e);
LOG.info("Address is " + localhost
+ " Loopback=" + localhost.isLoopbackAddress()
+ " Linklocal=" + localhost.isLinkLocalAddress());
}
}
}
/**
* Test that the name "localhost" resolves to something.
*
* If this fails, your machine's network is in a mess, go edit /etc/hosts
*/
@Test
public void testLocalhostResolves() throws Exception {
InetAddress localhost = InetAddress.getByName("localhost");
assertNotNull("localhost is null", localhost);
LOG.info("Localhost IPAddr is " + localhost.toString());
}
}
| 5,206 | 30.36747 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import org.apache.commons.math3.stat.inference.ChiSquareTest;
import org.junit.Assert;
import org.junit.Test;
public class TestClusterTopology extends Assert {
public static class NodeElement implements Node {
private String location;
private String name;
private Node parent;
private int level;
public NodeElement(String name) {
this.name = name;
}
@Override
public String getNetworkLocation() {
return location;
}
@Override
public void setNetworkLocation(String location) {
this.location = location;
}
@Override
public String getName() {
return name;
}
@Override
public Node getParent() {
return parent;
}
@Override
public void setParent(Node parent) {
this.parent = parent;
}
@Override
public int getLevel() {
return level;
}
@Override
public void setLevel(int i) {
this.level = i;
}
}
/**
* Test the count of nodes with exclude list
*/
@Test
public void testCountNumNodes() throws Exception {
// create the topology
NetworkTopology cluster = new NetworkTopology();
NodeElement node1 = getNewNode("node1", "/d1/r1");
cluster.add(node1);
NodeElement node2 = getNewNode("node2", "/d1/r2");
cluster.add(node2);
NodeElement node3 = getNewNode("node3", "/d1/r3");
cluster.add(node3);
NodeElement node4 = getNewNode("node4", "/d1/r4");
cluster.add(node4);
// create exclude list
List<Node> excludedNodes = new ArrayList<Node>();
assertEquals("4 nodes should be available", 4,
cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
NodeElement deadNode = getNewNode("node5", "/d1/r2");
excludedNodes.add(deadNode);
assertEquals("4 nodes should be available with extra excluded Node", 4,
cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
// add one existing node to exclude list
excludedNodes.add(node4);
assertEquals("excluded nodes with ROOT scope should be considered", 3,
cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
assertEquals("excluded nodes without ~ scope should be considered", 2,
cluster.countNumOfAvailableNodes("~" + deadNode.getNetworkLocation(),
excludedNodes));
assertEquals("excluded nodes with rack scope should be considered", 1,
cluster.countNumOfAvailableNodes(deadNode.getNetworkLocation(),
excludedNodes));
// adding the node in excluded scope to excluded list
excludedNodes.add(node2);
assertEquals("excluded nodes with ~ scope should be considered", 2,
cluster.countNumOfAvailableNodes("~" + deadNode.getNetworkLocation(),
excludedNodes));
// getting count with non-exist scope.
assertEquals("No nodes should be considered for non-exist scope", 0,
cluster.countNumOfAvailableNodes("/non-exist", excludedNodes));
// remove a node from the cluster
cluster.remove(node1);
assertEquals("1 node should be available", 1,
cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
}
/**
* Test how well we pick random nodes.
*/
@Test
public void testChooseRandom() {
// create the topology
NetworkTopology cluster = new NetworkTopology();
NodeElement node1 = getNewNode("node1", "/d1/r1");
cluster.add(node1);
NodeElement node2 = getNewNode("node2", "/d1/r2");
cluster.add(node2);
NodeElement node3 = getNewNode("node3", "/d1/r3");
cluster.add(node3);
NodeElement node4 = getNewNode("node4", "/d1/r3");
cluster.add(node4);
// Number of iterations to do the test
int numIterations = 100;
// Pick random nodes
HashMap<String,Integer> histogram = new HashMap<String,Integer>();
for (int i=0; i<numIterations; i++) {
String randomNode = cluster.chooseRandom(NodeBase.ROOT).getName();
if (!histogram.containsKey(randomNode)) {
histogram.put(randomNode, 0);
}
histogram.put(randomNode, histogram.get(randomNode) + 1);
}
assertEquals("Random is not selecting all nodes", 4, histogram.size());
// Check with 99% confidence (alpha=0.01 as confidence = (100 * (1 - alpha)
ChiSquareTest chiSquareTest = new ChiSquareTest();
double[] expected = new double[histogram.size()];
long[] observed = new long[histogram.size()];
int j=0;
for (Integer occurrence : histogram.values()) {
expected[j] = 1.0 * numIterations / histogram.size();
observed[j] = occurrence;
j++;
}
boolean chiSquareTestRejected =
chiSquareTest.chiSquareTest(expected, observed, 0.01);
// Check that they have the proper distribution
assertFalse("Not choosing nodes randomly", chiSquareTestRejected);
// Pick random nodes excluding the 2 nodes in /d1/r3
histogram = new HashMap<String,Integer>();
for (int i=0; i<numIterations; i++) {
String randomNode = cluster.chooseRandom("~/d1/r3").getName();
if (!histogram.containsKey(randomNode)) {
histogram.put(randomNode, 0);
}
histogram.put(randomNode, histogram.get(randomNode) + 1);
}
assertEquals("Random is not selecting the nodes it should",
2, histogram.size());
}
private NodeElement getNewNode(String name, String rackLocation) {
NodeElement node = new NodeElement(name);
node.setNetworkLocation(rackLocation);
return node;
}
}
| 6,383 | 32.6 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestScriptBasedMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import junit.framework.TestCase;
import org.junit.Test;
public class TestScriptBasedMapping extends TestCase {
public TestScriptBasedMapping() {
}
@Test
public void testNoArgsMeansNoResult() {
Configuration conf = new Configuration();
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,
ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
ScriptBasedMapping mapping = createMapping(conf);
List<String> names = new ArrayList<String>();
names.add("some.machine.name");
names.add("other.machine.name");
List<String> result = mapping.resolve(names);
assertNull("Expected an empty list", result);
}
@Test
public void testNoFilenameMeansSingleSwitch() throws Throwable {
Configuration conf = new Configuration();
ScriptBasedMapping mapping = createMapping(conf);
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
assertTrue("Expected to be single switch",
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
@Test
public void testFilenameMeansMultiSwitch() throws Throwable {
Configuration conf = new Configuration();
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
ScriptBasedMapping mapping = createMapping(conf);
assertFalse("Expected to be multi switch", mapping.isSingleSwitch());
mapping.setConf(new Configuration());
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
}
@Test
public void testNullConfig() throws Throwable {
ScriptBasedMapping mapping = createMapping(null);
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
}
private ScriptBasedMapping createMapping(Configuration conf) {
ScriptBasedMapping mapping = new ScriptBasedMapping();
mapping.setConf(conf);
return mapping;
}
}
| 2,912 | 34.52439 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
/**
* Test some other details of the switch mapping
*/
public class TestSwitchMapping extends Assert {
/**
* Verify the switch mapping query handles arbitrary DNSToSwitchMapping
* implementations
*
* @throws Throwable on any problem
*/
@Test
public void testStandaloneClassesAssumedMultiswitch() throws Throwable {
DNSToSwitchMapping mapping = new StandaloneSwitchMapping();
assertFalse("Expected to be multi switch " + mapping,
AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
/**
* Verify the cached mapper delegates the switch mapping query to the inner
* mapping, which again handles arbitrary DNSToSwitchMapping implementations
*
* @throws Throwable on any problem
*/
@Test
public void testCachingRelays() throws Throwable {
CachedDNSToSwitchMapping mapping =
new CachedDNSToSwitchMapping(new StandaloneSwitchMapping());
assertFalse("Expected to be multi switch " + mapping,
mapping.isSingleSwitch());
}
/**
* Verify the cached mapper delegates the switch mapping query to the inner
* mapping, which again handles arbitrary DNSToSwitchMapping implementations
*
* @throws Throwable on any problem
*/
@Test
public void testCachingRelaysStringOperations() throws Throwable {
Configuration conf = new Configuration();
String scriptname = "mappingscript.sh";
conf.set(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
scriptname);
ScriptBasedMapping scriptMapping = new ScriptBasedMapping(conf);
assertTrue("Did not find " + scriptname + " in " + scriptMapping,
scriptMapping.toString().contains(scriptname));
CachedDNSToSwitchMapping mapping =
new CachedDNSToSwitchMapping(scriptMapping);
assertTrue("Did not find " + scriptname + " in " + mapping,
mapping.toString().contains(scriptname));
}
/**
* Verify the cached mapper delegates the switch mapping query to the inner
* mapping, which again handles arbitrary DNSToSwitchMapping implementations
*
* @throws Throwable on any problem
*/
@Test
public void testCachingRelaysStringOperationsToNullScript() throws Throwable {
Configuration conf = new Configuration();
ScriptBasedMapping scriptMapping = new ScriptBasedMapping(conf);
assertTrue("Did not find " + ScriptBasedMapping.NO_SCRIPT
+ " in " + scriptMapping,
scriptMapping.toString().contains(ScriptBasedMapping.NO_SCRIPT));
CachedDNSToSwitchMapping mapping =
new CachedDNSToSwitchMapping(scriptMapping);
assertTrue("Did not find " + ScriptBasedMapping.NO_SCRIPT
+ " in " + mapping,
mapping.toString().contains(ScriptBasedMapping.NO_SCRIPT));
}
@Test
public void testNullMapping() {
assertFalse(AbstractDNSToSwitchMapping.isMappingSingleSwitch(null));
}
/**
* This class does not extend the abstract switch mapping, and verifies that
* the switch mapping logic assumes that this is multi switch
*/
private static class StandaloneSwitchMapping implements DNSToSwitchMapping {
@Override
public List<String> resolve(List<String> names) {
return names;
}
@Override
public void reloadCachedMappings() {
}
@Override
public void reloadCachedMappings(List<String> names) {
}
}
}
| 4,414 | 33.224806 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.io.OutputStream;
import java.net.SocketTimeoutException;
import java.nio.channels.Pipe;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MultithreadedTestUtil;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* This tests timout out from SocketInputStream and
* SocketOutputStream using pipes.
*
* Normal read and write using these streams are tested by pretty much
* every DFS unit test.
*/
public class TestSocketIOWithTimeout {
static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class);
private static int TIMEOUT = 1*1000;
private static String TEST_STRING = "1234567890";
private MultithreadedTestUtil.TestContext ctx = new TestContext();
private static final int PAGE_SIZE = (int) NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
private void doIO(InputStream in, OutputStream out,
int expectedTimeout) throws IOException {
/* Keep on writing or reading until we get SocketTimeoutException.
* It expects this exception to occur within 100 millis of TIMEOUT.
*/
byte buf[] = new byte[PAGE_SIZE + 19];
while (true) {
long start = Time.now();
try {
if (in != null) {
in.read(buf);
} else {
out.write(buf);
}
} catch (SocketTimeoutException e) {
long diff = Time.now() - start;
LOG.info("Got SocketTimeoutException as expected after " +
diff + " millis : " + e.getMessage());
assertTrue(Math.abs(expectedTimeout - diff) <=
TestNetUtils.TIME_FUDGE_MILLIS);
break;
}
}
}
@Test
public void testSocketIOWithTimeout() throws Exception {
// first open pipe:
Pipe pipe = Pipe.open();
Pipe.SourceChannel source = pipe.source();
Pipe.SinkChannel sink = pipe.sink();
try {
final InputStream in = new SocketInputStream(source, TIMEOUT);
OutputStream out = new SocketOutputStream(sink, TIMEOUT);
byte[] writeBytes = TEST_STRING.getBytes();
byte[] readBytes = new byte[writeBytes.length];
byte byteWithHighBit = (byte)0x80;
out.write(writeBytes);
out.write(byteWithHighBit);
doIO(null, out, TIMEOUT);
in.read(readBytes);
assertTrue(Arrays.equals(writeBytes, readBytes));
assertEquals(byteWithHighBit & 0xff, in.read());
doIO(in, null, TIMEOUT);
// Change timeout on the read side.
((SocketInputStream)in).setTimeout(TIMEOUT * 2);
doIO(in, null, TIMEOUT * 2);
/*
* Verify that it handles interrupted threads properly.
* Use a large timeout and expect the thread to return quickly
* upon interruption.
*/
((SocketInputStream)in).setTimeout(0);
TestingThread thread = new TestingThread(ctx) {
@Override
public void doWork() throws Exception {
try {
in.read();
fail("Did not fail with interrupt");
} catch (InterruptedIOException ste) {
LOG.info("Got expection while reading as expected : " +
ste.getMessage());
}
}
};
ctx.addThread(thread);
ctx.startThreads();
// If the thread is interrupted before it calls read()
// then it throws ClosedByInterruptException due to
// some Java quirk. Waiting for it to call read()
// gets it into select(), so we get the expected
// InterruptedIOException.
Thread.sleep(1000);
thread.interrupt();
ctx.stop();
//make sure the channels are still open
assertTrue(source.isOpen());
assertTrue(sink.isOpen());
// Nevertheless, the output stream is closed, because
// a partial write may have succeeded (see comment in
// SocketOutputStream#write(byte[]), int, int)
// This portion of the test cannot pass on Windows due to differences in
// behavior of partial writes. Windows appears to buffer large amounts of
// written data and send it all atomically, thus making it impossible to
// simulate a partial write scenario. Attempts were made to switch the
// test from using a pipe to a network socket and also to use larger and
// larger buffers in doIO. Nothing helped the situation though.
if (!Shell.WINDOWS) {
try {
out.write(1);
fail("Did not throw");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"stream is closed", ioe);
}
}
out.close();
assertFalse(sink.isOpen());
// close sink and expect -1 from source.read()
assertEquals(-1, in.read());
// make sure close() closes the underlying channel.
in.close();
assertFalse(source.isOpen());
} finally {
if (source != null) {
source.close();
}
if (sink != null) {
sink.close();
}
}
}
}
| 6,326 | 32.834225 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import static org.junit.Assert.*;
import java.io.EOFException;
import java.io.IOException;
import java.net.BindException;
import java.net.ConnectException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.List;
import java.util.concurrent.TimeUnit;
import junit.framework.AssertionFailedError;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.NetUtilsTestResolver;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestNetUtils {
private static final Log LOG = LogFactory.getLog(TestNetUtils.class);
private static final int DEST_PORT = 4040;
private static final String DEST_PORT_NAME = Integer.toString(DEST_PORT);
private static final int LOCAL_PORT = 8080;
private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT);
/**
* Some slop around expected times when making sure timeouts behave
* as expected. We assume that they will be accurate to within
* this threshold.
*/
static final long TIME_FUDGE_MILLIS = 200;
/**
* Test that we can't accidentally connect back to the connecting socket due
* to a quirk in the TCP spec.
*
* This is a regression test for HADOOP-6722.
*/
@Test
public void testAvoidLoopbackTcpSockets() throws Exception {
Configuration conf = new Configuration();
Socket socket = NetUtils.getDefaultSocketFactory(conf)
.createSocket();
socket.bind(new InetSocketAddress("127.0.0.1", 0));
System.err.println("local address: " + socket.getLocalAddress());
System.err.println("local port: " + socket.getLocalPort());
try {
NetUtils.connect(socket,
new InetSocketAddress(socket.getLocalAddress(), socket.getLocalPort()),
20000);
socket.close();
fail("Should not have connected");
} catch (ConnectException ce) {
System.err.println("Got exception: " + ce);
assertTrue(ce.getMessage().contains("resulted in a loopback"));
} catch (SocketException se) {
// Some TCP stacks will actually throw their own Invalid argument exception
// here. This is also OK.
assertTrue(se.getMessage().contains("Invalid argument"));
}
}
@Test
public void testSocketReadTimeoutWithChannel() throws Exception {
doSocketReadTimeoutTest(true);
}
@Test
public void testSocketReadTimeoutWithoutChannel() throws Exception {
doSocketReadTimeoutTest(false);
}
private void doSocketReadTimeoutTest(boolean withChannel)
throws IOException {
// Binding a ServerSocket is enough to accept connections.
// Rely on the backlog to accept for us.
ServerSocket ss = new ServerSocket(0);
Socket s;
if (withChannel) {
s = NetUtils.getDefaultSocketFactory(new Configuration())
.createSocket();
Assume.assumeNotNull(s.getChannel());
} else {
s = new Socket();
assertNull(s.getChannel());
}
SocketInputWrapper stm = null;
try {
NetUtils.connect(s, ss.getLocalSocketAddress(), 1000);
stm = NetUtils.getInputStream(s, 1000);
assertReadTimeout(stm, 1000);
// Change timeout, make sure it applies.
stm.setTimeout(1);
assertReadTimeout(stm, 1);
// If there is a channel, then setting the socket timeout
// should not matter. If there is not a channel, it will
// take effect.
s.setSoTimeout(1000);
if (withChannel) {
assertReadTimeout(stm, 1);
} else {
assertReadTimeout(stm, 1000);
}
} finally {
IOUtils.closeStream(stm);
IOUtils.closeSocket(s);
ss.close();
}
}
private void assertReadTimeout(SocketInputWrapper stm, int timeoutMillis)
throws IOException {
long st = System.nanoTime();
try {
stm.read();
fail("Didn't time out");
} catch (SocketTimeoutException ste) {
assertTimeSince(st, timeoutMillis);
}
}
private void assertTimeSince(long startNanos, int expectedMillis) {
long durationNano = System.nanoTime() - startNanos;
long millis = TimeUnit.MILLISECONDS.convert(
durationNano, TimeUnit.NANOSECONDS);
assertTrue("Expected " + expectedMillis + "ms, but took " + millis,
Math.abs(millis - expectedMillis) < TIME_FUDGE_MILLIS);
}
/**
* Test for {
* @throws UnknownHostException @link NetUtils#getLocalInetAddress(String)
* @throws SocketException
*/
@Test
public void testGetLocalInetAddress() throws Exception {
assertNotNull(NetUtils.getLocalInetAddress("127.0.0.1"));
assertNull(NetUtils.getLocalInetAddress("invalid-address-for-test"));
assertNull(NetUtils.getLocalInetAddress(null));
}
@Test(expected=UnknownHostException.class)
public void testVerifyHostnamesException() throws UnknownHostException {
String[] names = {"valid.host.com", "1.com", "invalid host here"};
NetUtils.verifyHostnames(names);
}
@Test
public void testVerifyHostnamesNoException() {
String[] names = {"valid.host.com", "1.com"};
try {
NetUtils.verifyHostnames(names);
} catch (UnknownHostException e) {
fail("NetUtils.verifyHostnames threw unexpected UnknownHostException");
}
}
/**
* Test for {@link NetUtils#isLocalAddress(java.net.InetAddress)}
*/
@Test
public void testIsLocalAddress() throws Exception {
// Test - local host is local address
assertTrue(NetUtils.isLocalAddress(InetAddress.getLocalHost()));
// Test - all addresses bound network interface is local address
Enumeration<NetworkInterface> interfaces = NetworkInterface
.getNetworkInterfaces();
if (interfaces != null) { // Iterate through all network interfaces
while (interfaces.hasMoreElements()) {
NetworkInterface i = interfaces.nextElement();
Enumeration<InetAddress> addrs = i.getInetAddresses();
if (addrs == null) {
continue;
}
// Iterate through all the addresses of a network interface
while (addrs.hasMoreElements()) {
InetAddress addr = addrs.nextElement();
assertTrue(NetUtils.isLocalAddress(addr));
}
}
}
assertFalse(NetUtils.isLocalAddress(InetAddress.getByName("8.8.8.8")));
}
@Test
public void testWrapConnectException() throws Throwable {
IOException e = new ConnectException("failed");
IOException wrapped = verifyExceptionClass(e, ConnectException.class);
assertInException(wrapped, "failed");
assertWikified(wrapped);
assertInException(wrapped, "localhost");
assertRemoteDetailsIncluded(wrapped);
assertInException(wrapped, "/ConnectionRefused");
}
@Test
public void testWrapBindException() throws Throwable {
IOException e = new BindException("failed");
IOException wrapped = verifyExceptionClass(e, BindException.class);
assertInException(wrapped, "failed");
assertLocalDetailsIncluded(wrapped);
assertNotInException(wrapped, DEST_PORT_NAME);
assertInException(wrapped, "/BindException");
}
@Test
public void testWrapUnknownHostException() throws Throwable {
IOException e = new UnknownHostException("failed");
IOException wrapped = verifyExceptionClass(e, UnknownHostException.class);
assertInException(wrapped, "failed");
assertWikified(wrapped);
assertInException(wrapped, "localhost");
assertRemoteDetailsIncluded(wrapped);
assertInException(wrapped, "/UnknownHost");
}
@Test
public void testWrapEOFException() throws Throwable {
IOException e = new EOFException("eof");
IOException wrapped = verifyExceptionClass(e, EOFException.class);
assertInException(wrapped, "eof");
assertWikified(wrapped);
assertInException(wrapped, "localhost");
assertRemoteDetailsIncluded(wrapped);
assertInException(wrapped, "/EOFException");
}
@Test
public void testGetConnectAddress() throws IOException {
NetUtils.addStaticResolution("host", "127.0.0.1");
InetSocketAddress addr = NetUtils.createSocketAddrForHost("host", 1);
InetSocketAddress connectAddr = NetUtils.getConnectAddress(addr);
assertEquals(addr.getHostName(), connectAddr.getHostName());
addr = new InetSocketAddress(1);
connectAddr = NetUtils.getConnectAddress(addr);
assertEquals(InetAddress.getLocalHost().getHostName(),
connectAddr.getHostName());
}
@Test
public void testCreateSocketAddress() throws Throwable {
InetSocketAddress addr = NetUtils.createSocketAddr(
"127.0.0.1:12345", 1000, "myconfig");
assertEquals("127.0.0.1", addr.getAddress().getHostAddress());
assertEquals(12345, addr.getPort());
addr = NetUtils.createSocketAddr(
"127.0.0.1", 1000, "myconfig");
assertEquals("127.0.0.1", addr.getAddress().getHostAddress());
assertEquals(1000, addr.getPort());
try {
addr = NetUtils.createSocketAddr(
"127.0.0.1:blahblah", 1000, "myconfig");
fail("Should have failed to parse bad port");
} catch (IllegalArgumentException iae) {
assertInException(iae, "myconfig");
}
}
private void assertRemoteDetailsIncluded(IOException wrapped)
throws Throwable {
assertInException(wrapped, "desthost");
assertInException(wrapped, DEST_PORT_NAME);
}
private void assertLocalDetailsIncluded(IOException wrapped)
throws Throwable {
assertInException(wrapped, "localhost");
assertInException(wrapped, LOCAL_PORT_NAME);
}
private void assertWikified(Exception e) throws Throwable {
assertInException(e, NetUtils.HADOOP_WIKI);
}
private void assertInException(Exception e, String text) throws Throwable {
String message = extractExceptionMessage(e);
if (!(message.contains(text))) {
throw new AssertionFailedError("Wrong text in message "
+ "\"" + message + "\""
+ " expected \"" + text + "\"")
.initCause(e);
}
}
private String extractExceptionMessage(Exception e) throws Throwable {
assertNotNull("Null Exception", e);
String message = e.getMessage();
if (message == null) {
throw new AssertionFailedError("Empty text in exception " + e)
.initCause(e);
}
return message;
}
private void assertNotInException(Exception e, String text)
throws Throwable{
String message = extractExceptionMessage(e);
if (message.contains(text)) {
throw new AssertionFailedError("Wrong text in message "
+ "\"" + message + "\""
+ " did not expect \"" + text + "\"")
.initCause(e);
}
}
private IOException verifyExceptionClass(IOException e,
Class expectedClass)
throws Throwable {
assertNotNull("Null Exception", e);
IOException wrapped =
NetUtils.wrapException("desthost", DEST_PORT,
"localhost", LOCAL_PORT,
e);
LOG.info(wrapped.toString(), wrapped);
if(!(wrapped.getClass().equals(expectedClass))) {
throw new AssertionFailedError("Wrong exception class; expected "
+ expectedClass
+ " got " + wrapped.getClass() + ": " + wrapped).initCause(wrapped);
}
return wrapped;
}
static NetUtilsTestResolver resolver;
static Configuration config;
@BeforeClass
public static void setupResolver() {
resolver = NetUtilsTestResolver.install();
}
@Before
public void resetResolver() {
resolver.reset();
config = new Configuration();
}
// getByExactName
private void verifyGetByExactNameSearch(String host, String ... searches) {
assertNull(resolver.getByExactName(host));
assertBetterArrayEquals(searches, resolver.getHostSearches());
}
@Test
public void testResolverGetByExactNameUnqualified() {
verifyGetByExactNameSearch("unknown", "unknown.");
}
@Test
public void testResolverGetByExactNameUnqualifiedWithDomain() {
verifyGetByExactNameSearch("unknown.domain", "unknown.domain.");
}
@Test
public void testResolverGetByExactNameQualified() {
verifyGetByExactNameSearch("unknown.", "unknown.");
}
@Test
public void testResolverGetByExactNameQualifiedWithDomain() {
verifyGetByExactNameSearch("unknown.domain.", "unknown.domain.");
}
// getByNameWithSearch
private void verifyGetByNameWithSearch(String host, String ... searches) {
assertNull(resolver.getByNameWithSearch(host));
assertBetterArrayEquals(searches, resolver.getHostSearches());
}
@Test
public void testResolverGetByNameWithSearchUnqualified() {
String host = "unknown";
verifyGetByNameWithSearch(host, host+".a.b.", host+".b.", host+".c.");
}
@Test
public void testResolverGetByNameWithSearchUnqualifiedWithDomain() {
String host = "unknown.domain";
verifyGetByNameWithSearch(host, host+".a.b.", host+".b.", host+".c.");
}
@Test
public void testResolverGetByNameWithSearchQualified() {
String host = "unknown.";
verifyGetByNameWithSearch(host, host);
}
@Test
public void testResolverGetByNameWithSearchQualifiedWithDomain() {
String host = "unknown.domain.";
verifyGetByNameWithSearch(host, host);
}
// getByName
private void verifyGetByName(String host, String ... searches) {
InetAddress addr = null;
try {
addr = resolver.getByName(host);
} catch (UnknownHostException e) {} // ignore
assertNull(addr);
assertBetterArrayEquals(searches, resolver.getHostSearches());
}
@Test
public void testResolverGetByNameQualified() {
String host = "unknown.";
verifyGetByName(host, host);
}
@Test
public void testResolverGetByNameQualifiedWithDomain() {
verifyGetByName("unknown.domain.", "unknown.domain.");
}
@Test
public void testResolverGetByNameUnqualified() {
String host = "unknown";
verifyGetByName(host, host+".a.b.", host+".b.", host+".c.", host+".");
}
@Test
public void testResolverGetByNameUnqualifiedWithDomain() {
String host = "unknown.domain";
verifyGetByName(host, host+".", host+".a.b.", host+".b.", host+".c.");
}
// resolving of hosts
private InetAddress verifyResolve(String host, String ... searches) {
InetAddress addr = null;
try {
addr = resolver.getByName(host);
} catch (UnknownHostException e) {} // ignore
assertNotNull(addr);
assertBetterArrayEquals(searches, resolver.getHostSearches());
return addr;
}
private void
verifyInetAddress(InetAddress addr, String host, String ip) {
assertNotNull(addr);
assertEquals(host, addr.getHostName());
assertEquals(ip, addr.getHostAddress());
}
@Test
public void testResolverUnqualified() {
String host = "host";
InetAddress addr = verifyResolve(host, host+".a.b.");
verifyInetAddress(addr, "host.a.b", "1.1.1.1");
}
@Test
public void testResolverUnqualifiedWithDomain() {
String host = "host.a";
InetAddress addr = verifyResolve(host, host+".", host+".a.b.", host+".b.");
verifyInetAddress(addr, "host.a.b", "1.1.1.1");
}
@Test
public void testResolverUnqualifedFull() {
String host = "host.a.b";
InetAddress addr = verifyResolve(host, host+".");
verifyInetAddress(addr, host, "1.1.1.1");
}
@Test
public void testResolverQualifed() {
String host = "host.a.b.";
InetAddress addr = verifyResolve(host, host);
verifyInetAddress(addr, host, "1.1.1.1");
}
// localhost
@Test
public void testResolverLoopback() {
String host = "Localhost";
InetAddress addr = verifyResolve(host); // no lookup should occur
verifyInetAddress(addr, "Localhost", "127.0.0.1");
}
@Test
public void testResolverIP() {
String host = "1.1.1.1";
InetAddress addr = verifyResolve(host); // no lookup should occur for ips
verifyInetAddress(addr, host, host);
}
//
@Test
public void testCanonicalUriWithPort() {
URI uri;
uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123"), 456);
assertEquals("scheme://host.a.b:123", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123/"), 456);
assertEquals("scheme://host.a.b:123/", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123/path"), 456);
assertEquals("scheme://host.a.b:123/path", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123/path?q#frag"), 456);
assertEquals("scheme://host.a.b:123/path?q#frag", uri.toString());
}
@Test
public void testCanonicalUriWithDefaultPort() {
URI uri;
uri = NetUtils.getCanonicalUri(URI.create("scheme://host"), 123);
assertEquals("scheme://host.a.b:123", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme://host/"), 123);
assertEquals("scheme://host.a.b:123/", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme://host/path"), 123);
assertEquals("scheme://host.a.b:123/path", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme://host/path?q#frag"), 123);
assertEquals("scheme://host.a.b:123/path?q#frag", uri.toString());
}
@Test
public void testCanonicalUriWithPath() {
URI uri;
uri = NetUtils.getCanonicalUri(URI.create("path"), 2);
assertEquals("path", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("/path"), 2);
assertEquals("/path", uri.toString());
}
@Test
public void testCanonicalUriWithNoAuthority() {
URI uri;
uri = NetUtils.getCanonicalUri(URI.create("scheme:/"), 2);
assertEquals("scheme:/", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme:/path"), 2);
assertEquals("scheme:/path", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme:///"), 2);
assertEquals("scheme:///", uri.toString());
uri = NetUtils.getCanonicalUri(URI.create("scheme:///path"), 2);
assertEquals("scheme:///path", uri.toString());
}
@Test
public void testCanonicalUriWithNoHost() {
URI uri = NetUtils.getCanonicalUri(URI.create("scheme://:123/path"), 2);
assertEquals("scheme://:123/path", uri.toString());
}
@Test
public void testCanonicalUriWithNoPortNoDefaultPort() {
URI uri = NetUtils.getCanonicalUri(URI.create("scheme://host/path"), -1);
assertEquals("scheme://host.a.b/path", uri.toString());
}
/**
* Test for {@link NetUtils#normalizeHostNames}
*/
@Test
public void testNormalizeHostName() {
List<String> hosts = Arrays.asList(new String[] {"127.0.0.1",
"localhost", "1.kanyezone.appspot.com", "UnknownHost123"});
List<String> normalizedHosts = NetUtils.normalizeHostNames(hosts);
// when ipaddress is normalized, same address is expected in return
assertEquals(normalizedHosts.get(0), hosts.get(0));
// for normalizing a resolvable hostname, resolved ipaddress is expected in return
assertFalse(normalizedHosts.get(1).equals(hosts.get(1)));
assertEquals(normalizedHosts.get(1), hosts.get(0));
// this address HADOOP-8372: when normalizing a valid resolvable hostname start with numeric,
// its ipaddress is expected to return
assertFalse(normalizedHosts.get(2).equals(hosts.get(2)));
// return the same hostname after normalizing a irresolvable hostname.
assertEquals(normalizedHosts.get(3), hosts.get(3));
}
@Test
public void testGetHostNameOfIP() {
assertNull(NetUtils.getHostNameOfIP(null));
assertNull(NetUtils.getHostNameOfIP(""));
assertNull(NetUtils.getHostNameOfIP("crazytown"));
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:")); // no port
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:-1")); // bogus port
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:A")); // bogus port
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1"));
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1:1"));
}
@Test
public void testTrimCreateSocketAddress() {
Configuration conf = new Configuration();
NetUtils.addStaticResolution("host", "127.0.0.1");
final String defaultAddr = "host:1 ";
InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);
conf.setSocketAddr("myAddress", addr);
assertEquals(defaultAddr.trim(), NetUtils.getHostPortString(addr));
}
private <T> void assertBetterArrayEquals(T[] expect, T[]got) {
String expectStr = StringUtils.join(expect, ", ");
String gotStr = StringUtils.join(got, ", ");
assertEquals(expectStr, gotStr);
}
}
| 22,012 | 32.202112 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.HashMap;
import java.util.Map;
import org.junit.Test;
public class TestNetworkTopologyWithNodeGroup {
private final static NetworkTopologyWithNodeGroup cluster = new
NetworkTopologyWithNodeGroup();
private final static NodeBase dataNodes[] = new NodeBase[] {
new NodeBase("h1", "/d1/r1/s1"),
new NodeBase("h2", "/d1/r1/s1"),
new NodeBase("h3", "/d1/r1/s2"),
new NodeBase("h4", "/d1/r2/s3"),
new NodeBase("h5", "/d1/r2/s3"),
new NodeBase("h6", "/d1/r2/s4"),
new NodeBase("h7", "/d2/r3/s5"),
new NodeBase("h8", "/d2/r3/s6")
};
private final static NodeBase computeNode = new NodeBase("/d1/r1/s1/h9");
private final static NodeBase rackOnlyNode = new NodeBase("h10", "/r2");
static {
for(int i=0; i<dataNodes.length; i++) {
cluster.add(dataNodes[i]);
}
}
@Test
public void testNumOfChildren() throws Exception {
assertEquals(dataNodes.length, cluster.getNumOfLeaves());
}
@Test
public void testNumOfRacks() throws Exception {
assertEquals(3, cluster.getNumOfRacks());
}
@Test
public void testRacks() throws Exception {
assertEquals(3, cluster.getNumOfRacks());
assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1]));
assertTrue(cluster.isOnSameRack(dataNodes[1], dataNodes[2]));
assertFalse(cluster.isOnSameRack(dataNodes[2], dataNodes[3]));
assertTrue(cluster.isOnSameRack(dataNodes[3], dataNodes[4]));
assertTrue(cluster.isOnSameRack(dataNodes[4], dataNodes[5]));
assertFalse(cluster.isOnSameRack(dataNodes[5], dataNodes[6]));
assertTrue(cluster.isOnSameRack(dataNodes[6], dataNodes[7]));
}
@Test
public void testNodeGroups() throws Exception {
assertEquals(3, cluster.getNumOfRacks());
assertTrue(cluster.isOnSameNodeGroup(dataNodes[0], dataNodes[1]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[1], dataNodes[2]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[2], dataNodes[3]));
assertTrue(cluster.isOnSameNodeGroup(dataNodes[3], dataNodes[4]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[4], dataNodes[5]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[5], dataNodes[6]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[6], dataNodes[7]));
}
@Test
public void testGetDistance() throws Exception {
assertEquals(0, cluster.getDistance(dataNodes[0], dataNodes[0]));
assertEquals(2, cluster.getDistance(dataNodes[0], dataNodes[1]));
assertEquals(4, cluster.getDistance(dataNodes[0], dataNodes[2]));
assertEquals(6, cluster.getDistance(dataNodes[0], dataNodes[3]));
assertEquals(8, cluster.getDistance(dataNodes[0], dataNodes[6]));
}
@Test
public void testSortByDistance() throws Exception {
NodeBase[] testNodes = new NodeBase[4];
// array contains both local node, local node group & local rack node
testNodes[0] = dataNodes[1];
testNodes[1] = dataNodes[2];
testNodes[2] = dataNodes[3];
testNodes[3] = dataNodes[0];
cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
assertTrue(testNodes[2] == dataNodes[2]);
assertTrue(testNodes[3] == dataNodes[3]);
// array contains local node & local node group
testNodes[0] = dataNodes[3];
testNodes[1] = dataNodes[4];
testNodes[2] = dataNodes[1];
testNodes[3] = dataNodes[0];
cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[1]);
// array contains local node & rack node
testNodes[0] = dataNodes[5];
testNodes[1] = dataNodes[3];
testNodes[2] = dataNodes[2];
testNodes[3] = dataNodes[0];
cluster.sortByDistance(dataNodes[0], testNodes, testNodes.length);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[2]);
// array contains local-nodegroup node (not a data node also) & rack node
testNodes[0] = dataNodes[6];
testNodes[1] = dataNodes[7];
testNodes[2] = dataNodes[2];
testNodes[3] = dataNodes[0];
cluster.sortByDistance(computeNode, testNodes, testNodes.length);
assertTrue(testNodes[0] == dataNodes[0]);
assertTrue(testNodes[1] == dataNodes[2]);
}
/**
* This picks a large number of nodes at random in order to ensure coverage
*
* @param numNodes the number of nodes
* @param excludedScope the excluded scope
* @return the frequency that nodes were chosen
*/
private Map<Node, Integer> pickNodesAtRandom(int numNodes,
String excludedScope) {
Map<Node, Integer> frequency = new HashMap<Node, Integer>();
for (NodeBase dnd : dataNodes) {
frequency.put(dnd, 0);
}
for (int j = 0; j < numNodes; j++) {
Node random = cluster.chooseRandom(excludedScope);
frequency.put(random, frequency.get(random) + 1);
}
return frequency;
}
/**
* This test checks that chooseRandom works for an excluded node.
*/
/**
* Test replica placement policy in case last node is invalid.
* We create 6 nodes but the last node is in fault topology (with rack info),
* so cannot be added to cluster. We should test proper exception is thrown in
* adding node but shouldn't affect the cluster.
*/
@Test
public void testChooseRandomExcludedNode() {
String scope = "~" + NodeBase.getPath(dataNodes[0]);
Map<Node, Integer> frequency = pickNodesAtRandom(100, scope);
for (Node key : dataNodes) {
// all nodes except the first should be more than zero
assertTrue(frequency.get(key) > 0 || key == dataNodes[0]);
}
}
/**
* This test checks that adding a node with invalid topology will be failed
* with an exception to show topology is invalid.
*/
@Test
public void testAddNodeWithInvalidTopology() {
// The last node is a node with invalid topology
try {
cluster.add(rackOnlyNode);
fail("Exception should be thrown, so we should not have reached here.");
} catch (Exception e) {
if (!(e instanceof IllegalArgumentException)) {
fail("Expecting IllegalArgumentException, but caught:" + e);
}
assertTrue(e.getMessage().contains("illegal network location"));
}
}
}
| 7,322 | 35.432836 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocketWatcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net.unix;
import static org.junit.Assert.assertFalse;
import java.util.ArrayList;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.After;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import com.google.common.util.concurrent.Uninterruptibles;
public class TestDomainSocketWatcher {
static final Log LOG = LogFactory.getLog(TestDomainSocketWatcher.class);
private Throwable trappedException = null;
@Before
public void before() {
Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
}
@After
public void after() {
if (trappedException != null) {
throw new IllegalStateException(
"DomainSocketWatcher thread terminated with unexpected exception.",
trappedException);
}
}
/**
* Test that we can create a DomainSocketWatcher and then shut it down.
*/
@Test(timeout=60000)
public void testCreateShutdown() throws Exception {
DomainSocketWatcher watcher = newDomainSocketWatcher(10000000);
watcher.close();
}
/**
* Test that we can get notifications out a DomainSocketWatcher.
*/
@Test(timeout=180000)
public void testDeliverNotifications() throws Exception {
DomainSocketWatcher watcher = newDomainSocketWatcher(10000000);
DomainSocket pair[] = DomainSocket.socketpair();
final CountDownLatch latch = new CountDownLatch(1);
watcher.add(pair[1], new DomainSocketWatcher.Handler() {
@Override
public boolean handle(DomainSocket sock) {
latch.countDown();
return true;
}
});
pair[0].close();
latch.await();
watcher.close();
}
/**
* Test that a java interruption can stop the watcher thread
*/
@Test(timeout=60000)
public void testInterruption() throws Exception {
final DomainSocketWatcher watcher = newDomainSocketWatcher(10);
watcher.watcherThread.interrupt();
Uninterruptibles.joinUninterruptibly(watcher.watcherThread);
watcher.close();
}
/**
* Test that domain sockets are closed when the watcher is closed.
*/
@Test(timeout=300000)
public void testCloseSocketOnWatcherClose() throws Exception {
final DomainSocketWatcher watcher = newDomainSocketWatcher(10000000);
DomainSocket pair[] = DomainSocket.socketpair();
watcher.add(pair[1], new DomainSocketWatcher.Handler() {
@Override
public boolean handle(DomainSocket sock) {
return true;
}
});
watcher.close();
Uninterruptibles.joinUninterruptibly(watcher.watcherThread);
assertFalse(pair[1].isOpen());
}
@Test(timeout=300000)
public void testStress() throws Exception {
final int SOCKET_NUM = 250;
final ReentrantLock lock = new ReentrantLock();
final DomainSocketWatcher watcher = newDomainSocketWatcher(10000000);
final ArrayList<DomainSocket[]> pairs = new ArrayList<DomainSocket[]>();
final AtomicInteger handled = new AtomicInteger(0);
final Thread adderThread = new Thread(new Runnable() {
@Override
public void run() {
try {
for (int i = 0; i < SOCKET_NUM; i++) {
DomainSocket pair[] = DomainSocket.socketpair();
watcher.add(pair[1], new DomainSocketWatcher.Handler() {
@Override
public boolean handle(DomainSocket sock) {
handled.incrementAndGet();
return true;
}
});
lock.lock();
try {
pairs.add(pair);
} finally {
lock.unlock();
}
}
} catch (Throwable e) {
LOG.error(e);
throw new RuntimeException(e);
}
}
});
final Thread removerThread = new Thread(new Runnable() {
@Override
public void run() {
final Random random = new Random();
try {
while (handled.get() != SOCKET_NUM) {
lock.lock();
try {
if (!pairs.isEmpty()) {
int idx = random.nextInt(pairs.size());
DomainSocket pair[] = pairs.remove(idx);
if (random.nextBoolean()) {
pair[0].close();
} else {
watcher.remove(pair[1]);
}
}
} finally {
lock.unlock();
}
}
} catch (Throwable e) {
LOG.error(e);
throw new RuntimeException(e);
}
}
});
adderThread.start();
removerThread.start();
Uninterruptibles.joinUninterruptibly(adderThread);
Uninterruptibles.joinUninterruptibly(removerThread);
watcher.close();
}
@Test(timeout = 300000)
public void testStressInterruption() throws Exception {
final int SOCKET_NUM = 250;
final ReentrantLock lock = new ReentrantLock();
final DomainSocketWatcher watcher = newDomainSocketWatcher(10);
final ArrayList<DomainSocket[]> pairs = new ArrayList<DomainSocket[]>();
final AtomicInteger handled = new AtomicInteger(0);
final Thread adderThread = new Thread(new Runnable() {
@Override
public void run() {
try {
for (int i = 0; i < SOCKET_NUM; i++) {
DomainSocket pair[] = DomainSocket.socketpair();
watcher.add(pair[1], new DomainSocketWatcher.Handler() {
@Override
public boolean handle(DomainSocket sock) {
handled.incrementAndGet();
return true;
}
});
lock.lock();
try {
pairs.add(pair);
} finally {
lock.unlock();
}
TimeUnit.MILLISECONDS.sleep(1);
}
} catch (Throwable e) {
LOG.error(e);
throw new RuntimeException(e);
}
}
});
final Thread removerThread = new Thread(new Runnable() {
@Override
public void run() {
final Random random = new Random();
try {
while (handled.get() != SOCKET_NUM) {
lock.lock();
try {
if (!pairs.isEmpty()) {
int idx = random.nextInt(pairs.size());
DomainSocket pair[] = pairs.remove(idx);
if (random.nextBoolean()) {
pair[0].close();
} else {
watcher.remove(pair[1]);
}
TimeUnit.MILLISECONDS.sleep(1);
}
} finally {
lock.unlock();
}
}
} catch (Throwable e) {
LOG.error(e);
throw new RuntimeException(e);
}
}
});
adderThread.start();
removerThread.start();
TimeUnit.MILLISECONDS.sleep(100);
watcher.watcherThread.interrupt();
Uninterruptibles.joinUninterruptibly(adderThread);
Uninterruptibles.joinUninterruptibly(removerThread);
Uninterruptibles.joinUninterruptibly(watcher.watcherThread);
}
/**
* Creates a new DomainSocketWatcher and tracks its thread for termination due
* to an unexpected exception. At the end of each test, if there was an
* unexpected exception, then that exception is thrown to force a failure of
* the test.
*
* @param interruptCheckPeriodMs interrupt check period passed to
* DomainSocketWatcher
* @return new DomainSocketWatcher
* @throws Exception if there is any failure
*/
private DomainSocketWatcher newDomainSocketWatcher(int interruptCheckPeriodMs)
throws Exception {
DomainSocketWatcher watcher = new DomainSocketWatcher(
interruptCheckPeriodMs, getClass().getSimpleName());
watcher.watcherThread.setUncaughtExceptionHandler(
new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread thread, Throwable t) {
trappedException = t;
}
});
return watcher;
}
}
| 9,020 | 30.764085 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TemporarySocketDirectory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net.unix;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileUtil;
/**
* Create a temporary directory in which sockets can be created.
* When creating a UNIX domain socket, the name
* must be fairly short (around 110 bytes on most platforms).
*/
public class TemporarySocketDirectory implements Closeable {
private File dir;
public TemporarySocketDirectory() {
String tmp = System.getProperty("java.io.tmpdir", "/tmp");
dir = new File(tmp, "socks." + (System.currentTimeMillis() +
"." + (new Random().nextInt())));
dir.mkdirs();
FileUtil.setWritable(dir, true);
}
public File getDir() {
return dir;
}
@Override
public void close() throws IOException {
if (dir != null) {
FileUtils.deleteDirectory(dir);
dir = null;
}
}
protected void finalize() throws IOException {
close();
}
}
| 1,816 | 29.283333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/unix/TestDomainSocket.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net.unix;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.nio.channels.AsynchronousCloseException;
import java.nio.channels.ClosedChannelException;
import java.util.Arrays;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket.DomainChannel;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import com.google.common.io.Files;
public class TestDomainSocket {
private static TemporarySocketDirectory sockDir;
@BeforeClass
public static void init() {
sockDir = new TemporarySocketDirectory();
DomainSocket.disableBindPathValidation();
}
@AfterClass
public static void shutdown() throws IOException {
sockDir.close();
}
@Before
public void before() {
Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
}
/**
* Test that we can create a socket and close it, even if it hasn't been
* opened.
*
* @throws IOException
*/
@Test(timeout=180000)
public void testSocketCreateAndClose() throws IOException {
DomainSocket serv = DomainSocket.bindAndListen(
new File(sockDir.getDir(), "test_sock_create_and_close").
getAbsolutePath());
serv.close();
}
/**
* Test DomainSocket path setting and getting.
*
* @throws IOException
*/
@Test(timeout=180000)
public void testSocketPathSetGet() throws IOException {
Assert.assertEquals("/var/run/hdfs/sock.100",
DomainSocket.getEffectivePath("/var/run/hdfs/sock._PORT", 100));
}
/**
* Test that we get a read result of -1 on EOF.
*
* @throws IOException
*/
@Test(timeout=180000)
public void testSocketReadEof() throws Exception {
final String TEST_PATH = new File(sockDir.getDir(),
"testSocketReadEof").getAbsolutePath();
final DomainSocket serv = DomainSocket.bindAndListen(TEST_PATH);
ExecutorService exeServ = Executors.newSingleThreadExecutor();
Callable<Void> callable = new Callable<Void>() {
public Void call(){
DomainSocket conn;
try {
conn = serv.accept();
} catch (IOException e) {
throw new RuntimeException("unexpected IOException", e);
}
byte buf[] = new byte[100];
for (int i = 0; i < buf.length; i++) {
buf[i] = 0;
}
try {
Assert.assertEquals(-1, conn.getInputStream().read());
} catch (IOException e) {
throw new RuntimeException("unexpected IOException", e);
}
return null;
}
};
Future<Void> future = exeServ.submit(callable);
DomainSocket conn = DomainSocket.connect(serv.getPath());
Thread.sleep(50);
conn.close();
serv.close();
future.get(2, TimeUnit.MINUTES);
}
/**
* Test that if one thread is blocking in a read or write operation, another
* thread can close the socket and stop the accept.
*
* @throws IOException
*/
@Test(timeout=180000)
public void testSocketAcceptAndClose() throws Exception {
final String TEST_PATH =
new File(sockDir.getDir(), "test_sock_accept_and_close").getAbsolutePath();
final DomainSocket serv = DomainSocket.bindAndListen(TEST_PATH);
ExecutorService exeServ = Executors.newSingleThreadExecutor();
Callable<Void> callable = new Callable<Void>() {
public Void call(){
try {
serv.accept();
throw new RuntimeException("expected the accept() to be " +
"interrupted and fail");
} catch (AsynchronousCloseException e) {
return null;
} catch (IOException e) {
throw new RuntimeException("unexpected IOException", e);
}
}
};
Future<Void> future = exeServ.submit(callable);
Thread.sleep(500);
serv.close();
future.get(2, TimeUnit.MINUTES);
}
/**
* Test that we get an AsynchronousCloseException when the DomainSocket
* we're using is closed during a read or write operation.
*
* @throws IOException
*/
private void testAsyncCloseDuringIO(final boolean closeDuringWrite)
throws Exception {
final String TEST_PATH = new File(sockDir.getDir(),
"testAsyncCloseDuringIO(" + closeDuringWrite + ")").getAbsolutePath();
final DomainSocket serv = DomainSocket.bindAndListen(TEST_PATH);
ExecutorService exeServ = Executors.newFixedThreadPool(2);
Callable<Void> serverCallable = new Callable<Void>() {
public Void call() {
DomainSocket serverConn = null;
try {
serverConn = serv.accept();
byte buf[] = new byte[100];
for (int i = 0; i < buf.length; i++) {
buf[i] = 0;
}
// The server just continues either writing or reading until someone
// asynchronously closes the client's socket. At that point, all our
// reads return EOF, and writes get a socket error.
if (closeDuringWrite) {
try {
while (true) {
serverConn.getOutputStream().write(buf);
}
} catch (IOException e) {
}
} else {
do { ; } while
(serverConn.getInputStream().read(buf, 0, buf.length) != -1);
}
} catch (IOException e) {
throw new RuntimeException("unexpected IOException", e);
} finally {
IOUtils.cleanup(DomainSocket.LOG, serverConn);
}
return null;
}
};
Future<Void> serverFuture = exeServ.submit(serverCallable);
final DomainSocket clientConn = DomainSocket.connect(serv.getPath());
Callable<Void> clientCallable = new Callable<Void>() {
public Void call(){
// The client writes or reads until another thread
// asynchronously closes the socket. At that point, we should
// get ClosedChannelException, or possibly its subclass
// AsynchronousCloseException.
byte buf[] = new byte[100];
for (int i = 0; i < buf.length; i++) {
buf[i] = 0;
}
try {
if (closeDuringWrite) {
while (true) {
clientConn.getOutputStream().write(buf);
}
} else {
while (true) {
clientConn.getInputStream().read(buf, 0, buf.length);
}
}
} catch (ClosedChannelException e) {
return null;
} catch (IOException e) {
throw new RuntimeException("unexpected IOException", e);
}
}
};
Future<Void> clientFuture = exeServ.submit(clientCallable);
Thread.sleep(500);
clientConn.close();
serv.close();
clientFuture.get(2, TimeUnit.MINUTES);
serverFuture.get(2, TimeUnit.MINUTES);
}
@Test(timeout=180000)
public void testAsyncCloseDuringWrite() throws Exception {
testAsyncCloseDuringIO(true);
}
@Test(timeout=180000)
public void testAsyncCloseDuringRead() throws Exception {
testAsyncCloseDuringIO(false);
}
/**
* Test that attempting to connect to an invalid path doesn't work.
*
* @throws IOException
*/
@Test(timeout=180000)
public void testInvalidOperations() throws IOException {
try {
DomainSocket.connect(
new File(sockDir.getDir(), "test_sock_invalid_operation").
getAbsolutePath());
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("connect(2) error: ", e);
}
}
/**
* Test setting some server options.
*
* @throws IOException
*/
@Test(timeout=180000)
public void testServerOptions() throws Exception {
final String TEST_PATH = new File(sockDir.getDir(),
"test_sock_server_options").getAbsolutePath();
DomainSocket serv = DomainSocket.bindAndListen(TEST_PATH);
try {
// Let's set a new receive buffer size
int bufSize = serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
int newBufSize = bufSize / 2;
serv.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE, newBufSize);
int nextBufSize = serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
Assert.assertEquals(newBufSize, nextBufSize);
// Let's set a server timeout
int newTimeout = 1000;
serv.setAttribute(DomainSocket.RECEIVE_TIMEOUT, newTimeout);
int nextTimeout = serv.getAttribute(DomainSocket.RECEIVE_TIMEOUT);
Assert.assertEquals(newTimeout, nextTimeout);
try {
serv.accept();
Assert.fail("expected the accept() to time out and fail");
} catch (SocketTimeoutException e) {
GenericTestUtils.assertExceptionContains("accept(2) error: ", e);
}
} finally {
serv.close();
Assert.assertFalse(serv.isOpen());
}
}
/**
* A Throwable representing success.
*
* We can't use null to represent this, because you cannot insert null into
* ArrayBlockingQueue.
*/
static class Success extends Throwable {
private static final long serialVersionUID = 1L;
}
static interface WriteStrategy {
/**
* Initialize a WriteStrategy object from a Socket.
*/
public void init(DomainSocket s) throws IOException;
/**
* Write some bytes.
*/
public void write(byte b[]) throws IOException;
}
static class OutputStreamWriteStrategy implements WriteStrategy {
private OutputStream outs = null;
public void init(DomainSocket s) throws IOException {
outs = s.getOutputStream();
}
public void write(byte b[]) throws IOException {
outs.write(b);
}
}
abstract static class ReadStrategy {
/**
* Initialize a ReadStrategy object from a DomainSocket.
*/
public abstract void init(DomainSocket s) throws IOException;
/**
* Read some bytes.
*/
public abstract int read(byte b[], int off, int length) throws IOException;
public void readFully(byte buf[], int off, int len) throws IOException {
int toRead = len;
while (toRead > 0) {
int ret = read(buf, off, toRead);
if (ret < 0) {
throw new IOException( "Premature EOF from inputStream");
}
toRead -= ret;
off += ret;
}
}
}
static class InputStreamReadStrategy extends ReadStrategy {
private InputStream ins = null;
@Override
public void init(DomainSocket s) throws IOException {
ins = s.getInputStream();
}
@Override
public int read(byte b[], int off, int length) throws IOException {
return ins.read(b, off, length);
}
}
static class DirectByteBufferReadStrategy extends ReadStrategy {
private DomainChannel ch = null;
@Override
public void init(DomainSocket s) throws IOException {
ch = s.getChannel();
}
@Override
public int read(byte b[], int off, int length) throws IOException {
ByteBuffer buf = ByteBuffer.allocateDirect(b.length);
int nread = ch.read(buf);
if (nread < 0) return nread;
buf.flip();
buf.get(b, off, nread);
return nread;
}
}
static class ArrayBackedByteBufferReadStrategy extends ReadStrategy {
private DomainChannel ch = null;
@Override
public void init(DomainSocket s) throws IOException {
ch = s.getChannel();
}
@Override
public int read(byte b[], int off, int length) throws IOException {
ByteBuffer buf = ByteBuffer.wrap(b);
int nread = ch.read(buf);
if (nread < 0) return nread;
buf.flip();
buf.get(b, off, nread);
return nread;
}
}
/**
* Test a simple client/server interaction.
*
* @throws IOException
*/
void testClientServer1(final Class<? extends WriteStrategy> writeStrategyClass,
final Class<? extends ReadStrategy> readStrategyClass,
final DomainSocket preConnectedSockets[]) throws Exception {
final String TEST_PATH = new File(sockDir.getDir(),
"test_sock_client_server1").getAbsolutePath();
final byte clientMsg1[] = new byte[] { 0x1, 0x2, 0x3, 0x4, 0x5, 0x6 };
final byte serverMsg1[] = new byte[] { 0x9, 0x8, 0x7, 0x6, 0x5 };
final byte clientMsg2 = 0x45;
final ArrayBlockingQueue<Throwable> threadResults =
new ArrayBlockingQueue<Throwable>(2);
final DomainSocket serv = (preConnectedSockets != null) ?
null : DomainSocket.bindAndListen(TEST_PATH);
Thread serverThread = new Thread() {
public void run(){
// Run server
DomainSocket conn = null;
try {
conn = preConnectedSockets != null ?
preConnectedSockets[0] : serv.accept();
byte in1[] = new byte[clientMsg1.length];
ReadStrategy reader = readStrategyClass.newInstance();
reader.init(conn);
reader.readFully(in1, 0, in1.length);
Assert.assertTrue(Arrays.equals(clientMsg1, in1));
WriteStrategy writer = writeStrategyClass.newInstance();
writer.init(conn);
writer.write(serverMsg1);
InputStream connInputStream = conn.getInputStream();
int in2 = connInputStream.read();
Assert.assertEquals((int)clientMsg2, in2);
conn.close();
} catch (Throwable e) {
threadResults.add(e);
Assert.fail(e.getMessage());
}
threadResults.add(new Success());
}
};
serverThread.start();
Thread clientThread = new Thread() {
public void run(){
try {
DomainSocket client = preConnectedSockets != null ?
preConnectedSockets[1] : DomainSocket.connect(TEST_PATH);
WriteStrategy writer = writeStrategyClass.newInstance();
writer.init(client);
writer.write(clientMsg1);
ReadStrategy reader = readStrategyClass.newInstance();
reader.init(client);
byte in1[] = new byte[serverMsg1.length];
reader.readFully(in1, 0, in1.length);
Assert.assertTrue(Arrays.equals(serverMsg1, in1));
OutputStream clientOutputStream = client.getOutputStream();
clientOutputStream.write(clientMsg2);
client.close();
} catch (Throwable e) {
threadResults.add(e);
}
threadResults.add(new Success());
}
};
clientThread.start();
for (int i = 0; i < 2; i++) {
Throwable t = threadResults.take();
if (!(t instanceof Success)) {
Assert.fail(t.getMessage() + ExceptionUtils.getStackTrace(t));
}
}
serverThread.join(120000);
clientThread.join(120000);
if (serv != null) {
serv.close();
}
}
@Test(timeout=180000)
public void testClientServerOutStreamInStream() throws Exception {
testClientServer1(OutputStreamWriteStrategy.class,
InputStreamReadStrategy.class, null);
}
@Test(timeout=180000)
public void testClientServerOutStreamInStreamWithSocketpair() throws Exception {
testClientServer1(OutputStreamWriteStrategy.class,
InputStreamReadStrategy.class, DomainSocket.socketpair());
}
@Test(timeout=180000)
public void testClientServerOutStreamInDbb() throws Exception {
testClientServer1(OutputStreamWriteStrategy.class,
DirectByteBufferReadStrategy.class, null);
}
@Test(timeout=180000)
public void testClientServerOutStreamInDbbWithSocketpair() throws Exception {
testClientServer1(OutputStreamWriteStrategy.class,
DirectByteBufferReadStrategy.class, DomainSocket.socketpair());
}
@Test(timeout=180000)
public void testClientServerOutStreamInAbb() throws Exception {
testClientServer1(OutputStreamWriteStrategy.class,
ArrayBackedByteBufferReadStrategy.class, null);
}
@Test(timeout=180000)
public void testClientServerOutStreamInAbbWithSocketpair() throws Exception {
testClientServer1(OutputStreamWriteStrategy.class,
ArrayBackedByteBufferReadStrategy.class, DomainSocket.socketpair());
}
static private class PassedFile {
private final int idx;
private final byte[] contents;
private FileInputStream fis;
public PassedFile(int idx) throws IOException {
this.idx = idx;
this.contents = new byte[] { (byte)(idx % 127) };
Files.write(contents, new File(getPath()));
this.fis = new FileInputStream(getPath());
}
public String getPath() {
return new File(sockDir.getDir(), "passed_file" + idx).getAbsolutePath();
}
public FileInputStream getInputStream() throws IOException {
return fis;
}
public void cleanup() throws IOException {
new File(getPath()).delete();
fis.close();
}
public void checkInputStream(FileInputStream fis) throws IOException {
byte buf[] = new byte[contents.length];
IOUtils.readFully(fis, buf, 0, buf.length);
Arrays.equals(contents, buf);
}
protected void finalize() {
try {
cleanup();
} catch(Throwable t) {
// ignore
}
}
}
/**
* Test file descriptor passing.
*
* @throws IOException
*/
@Test(timeout=180000)
public void testFdPassing() throws Exception {
final String TEST_PATH =
new File(sockDir.getDir(), "test_sock").getAbsolutePath();
final byte clientMsg1[] = new byte[] { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66 };
final byte serverMsg1[] = new byte[] { 0x31, 0x30, 0x32, 0x34, 0x31, 0x33,
0x44, 0x1, 0x1, 0x1, 0x1, 0x1 };
final ArrayBlockingQueue<Throwable> threadResults =
new ArrayBlockingQueue<Throwable>(2);
final DomainSocket serv = DomainSocket.bindAndListen(TEST_PATH);
final PassedFile passedFiles[] =
new PassedFile[] { new PassedFile(1), new PassedFile(2) };
final FileDescriptor passedFds[] = new FileDescriptor[passedFiles.length];
for (int i = 0; i < passedFiles.length; i++) {
passedFds[i] = passedFiles[i].getInputStream().getFD();
}
Thread serverThread = new Thread() {
public void run(){
// Run server
DomainSocket conn = null;
try {
conn = serv.accept();
byte in1[] = new byte[clientMsg1.length];
InputStream connInputStream = conn.getInputStream();
IOUtils.readFully(connInputStream, in1, 0, in1.length);
Assert.assertTrue(Arrays.equals(clientMsg1, in1));
DomainSocket domainConn = (DomainSocket)conn;
domainConn.sendFileDescriptors(passedFds, serverMsg1, 0,
serverMsg1.length);
conn.close();
} catch (Throwable e) {
threadResults.add(e);
Assert.fail(e.getMessage());
}
threadResults.add(new Success());
}
};
serverThread.start();
Thread clientThread = new Thread() {
public void run(){
try {
DomainSocket client = DomainSocket.connect(TEST_PATH);
OutputStream clientOutputStream = client.getOutputStream();
InputStream clientInputStream = client.getInputStream();
clientOutputStream.write(clientMsg1);
DomainSocket domainConn = (DomainSocket)client;
byte in1[] = new byte[serverMsg1.length];
FileInputStream recvFis[] = new FileInputStream[passedFds.length];
int r = domainConn.
recvFileInputStreams(recvFis, in1, 0, in1.length - 1);
Assert.assertTrue(r > 0);
IOUtils.readFully(clientInputStream, in1, r, in1.length - r);
Assert.assertTrue(Arrays.equals(serverMsg1, in1));
for (int i = 0; i < passedFds.length; i++) {
Assert.assertNotNull(recvFis[i]);
passedFiles[i].checkInputStream(recvFis[i]);
}
for (FileInputStream fis : recvFis) {
fis.close();
}
client.close();
} catch (Throwable e) {
threadResults.add(e);
}
threadResults.add(new Success());
}
};
clientThread.start();
for (int i = 0; i < 2; i++) {
Throwable t = threadResults.take();
if (!(t instanceof Success)) {
Assert.fail(t.getMessage() + ExceptionUtils.getStackTrace(t));
}
}
serverThread.join(120000);
clientThread.join(120000);
serv.close();
for (PassedFile pf : passedFiles) {
pf.cleanup();
}
}
/**
* Run validateSocketPathSecurity
*
* @param str The path to validate
* @param prefix A prefix to skip validation for
* @throws IOException
*/
private static void testValidateSocketPath(String str, String prefix)
throws IOException {
int skipComponents = 1;
File prefixFile = new File(prefix);
while (true) {
prefixFile = prefixFile.getParentFile();
if (prefixFile == null) {
break;
}
skipComponents++;
}
DomainSocket.validateSocketPathSecurity0(str,
skipComponents);
}
/**
* Test file descriptor path security.
*
* @throws IOException
*/
@Test(timeout=180000)
public void testFdPassingPathSecurity() throws Exception {
TemporarySocketDirectory tmp = new TemporarySocketDirectory();
try {
String prefix = tmp.getDir().getAbsolutePath();
Shell.execCommand(new String [] {
"mkdir", "-p", prefix + "/foo/bar/baz" });
Shell.execCommand(new String [] {
"chmod", "0700", prefix + "/foo/bar/baz" });
Shell.execCommand(new String [] {
"chmod", "0700", prefix + "/foo/bar" });
Shell.execCommand(new String [] {
"chmod", "0707", prefix + "/foo" });
Shell.execCommand(new String [] {
"mkdir", "-p", prefix + "/q1/q2" });
Shell.execCommand(new String [] {
"chmod", "0700", prefix + "/q1" });
Shell.execCommand(new String [] {
"chmod", "0700", prefix + "/q1/q2" });
testValidateSocketPath(prefix + "/q1/q2", prefix);
try {
testValidateSocketPath(prefix + "/foo/bar/baz", prefix);
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("/foo' is world-writable. " +
"Its permissions are 0707. Please fix this or select a " +
"different socket path.", e);
}
try {
testValidateSocketPath(prefix + "/nope", prefix);
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("failed to stat a path " +
"component: ", e);
}
// Root should be secure
DomainSocket.validateSocketPathSecurity0("/foo", 1);
} finally {
tmp.close();
}
}
@Test(timeout=180000)
public void testShutdown() throws Exception {
final AtomicInteger bytesRead = new AtomicInteger(0);
final AtomicBoolean failed = new AtomicBoolean(false);
final DomainSocket[] socks = DomainSocket.socketpair();
Runnable reader = new Runnable() {
@Override
public void run() {
while (true) {
try {
int ret = socks[1].getInputStream().read();
if (ret == -1) return;
bytesRead.addAndGet(1);
} catch (IOException e) {
DomainSocket.LOG.error("reader error", e);
failed.set(true);
return;
}
}
}
};
Thread readerThread = new Thread(reader);
readerThread.start();
socks[0].getOutputStream().write(1);
socks[0].getOutputStream().write(2);
socks[0].getOutputStream().write(3);
Assert.assertTrue(readerThread.isAlive());
socks[0].shutdown();
readerThread.join();
Assert.assertFalse(failed.get());
Assert.assertEquals(3, bytesRead.get());
IOUtils.cleanup(null, socks);
}
}
| 25,119 | 31.793734 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log;
import java.io.*;
import java.net.*;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.net.NetUtils;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.commons.logging.impl.*;
import org.apache.log4j.*;
public class TestLogLevel extends TestCase {
static final PrintStream out = System.out;
public void testDynamicLogLevel() throws Exception {
String logName = TestLogLevel.class.getName();
Log testlog = LogFactory.getLog(logName);
//only test Log4JLogger
if (testlog instanceof Log4JLogger) {
Logger log = ((Log4JLogger)testlog).getLogger();
log.debug("log.debug1");
log.info("log.info1");
log.error("log.error1");
assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));
HttpServer2 server = new HttpServer2.Builder().setName("..")
.addEndpoint(new URI("http://localhost:0")).setFindPort(true)
.build();
server.start();
String authority = NetUtils.getHostPortString(server
.getConnectorAddress(0));
//servlet
URL url = new URL("http://" + authority + "/logLevel?log=" + logName
+ "&level=" + Level.ERROR);
out.println("*** Connecting to " + url);
URLConnection connection = url.openConnection();
connection.connect();
BufferedReader in = new BufferedReader(new InputStreamReader(
connection.getInputStream()));
for(String line; (line = in.readLine()) != null; out.println(line));
in.close();
log.debug("log.debug2");
log.info("log.info2");
log.error("log.error2");
assertTrue(Level.ERROR.equals(log.getEffectiveLevel()));
//command line
String[] args = {"-setlevel", authority, logName, Level.DEBUG.toString()};
LogLevel.main(args);
log.debug("log.debug3");
log.info("log.info3");
log.error("log.error3");
assertTrue(Level.DEBUG.equals(log.getEffectiveLevel()));
}
else {
out.println(testlog.getClass() + " not tested.");
}
}
}
| 2,880 | 32.894118 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Appender;
import org.apache.log4j.Category;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.WriterAppender;
import org.apache.log4j.spi.HierarchyEventListener;
import org.apache.log4j.spi.LoggerFactory;
import org.apache.log4j.spi.LoggerRepository;
import org.apache.log4j.spi.ThrowableInformation;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.MappingJsonFactory;
import org.codehaus.jackson.node.ContainerNode;
import org.junit.Test;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import java.net.NoRouteToHostException;
import java.util.Enumeration;
import java.util.Vector;
public class TestLog4Json extends TestCase {
private static final Log LOG = LogFactory.getLog(TestLog4Json.class);
private static final JsonFactory factory = new MappingJsonFactory();
@Test
public void testConstruction() throws Throwable {
Log4Json l4j = new Log4Json();
String outcome = l4j.toJson(new StringWriter(),
"name", 0, "DEBUG", "thread1",
"hello, world", null).toString();
println("testConstruction", outcome);
}
@Test
public void testException() throws Throwable {
Exception e =
new NoRouteToHostException("that box caught fire 3 years ago");
ThrowableInformation ti = new ThrowableInformation(e);
Log4Json l4j = new Log4Json();
long timeStamp = Time.now();
String outcome = l4j.toJson(new StringWriter(),
"testException",
timeStamp,
"INFO",
"quoted\"",
"new line\n and {}",
ti)
.toString();
println("testException", outcome);
}
@Test
public void testNestedException() throws Throwable {
Exception e =
new NoRouteToHostException("that box caught fire 3 years ago");
Exception ioe = new IOException("Datacenter problems", e);
ThrowableInformation ti = new ThrowableInformation(ioe);
Log4Json l4j = new Log4Json();
long timeStamp = Time.now();
String outcome = l4j.toJson(new StringWriter(),
"testNestedException",
timeStamp,
"INFO",
"quoted\"",
"new line\n and {}",
ti)
.toString();
println("testNestedException", outcome);
ContainerNode rootNode = Log4Json.parse(outcome);
assertEntryEquals(rootNode, Log4Json.LEVEL, "INFO");
assertEntryEquals(rootNode, Log4Json.NAME, "testNestedException");
assertEntryEquals(rootNode, Log4Json.TIME, timeStamp);
assertEntryEquals(rootNode, Log4Json.EXCEPTION_CLASS,
ioe.getClass().getName());
JsonNode node = assertNodeContains(rootNode, Log4Json.STACK);
assertTrue("Not an array: " + node, node.isArray());
node = assertNodeContains(rootNode, Log4Json.DATE);
assertTrue("Not a string: " + node, node.isTextual());
//rather than try and make assertions about the format of the text
//message equalling another ISO date, this test asserts that the hypen
//and colon characters are in the string.
String dateText = node.getTextValue();
assertTrue("No '-' in " + dateText, dateText.contains("-"));
assertTrue("No '-' in " + dateText, dateText.contains(":"));
}
/**
* Create a log instance and and log to it
* @throws Throwable if it all goes wrong
*/
@Test
public void testLog() throws Throwable {
String message = "test message";
Throwable throwable = null;
String json = logOut(message, throwable);
println("testLog", json);
}
/**
* Create a log instance and and log to it
* @throws Throwable if it all goes wrong
*/
@Test
public void testLogExceptions() throws Throwable {
String message = "test message";
Throwable inner = new IOException("Directory / not found");
Throwable throwable = new IOException("startup failure", inner);
String json = logOut(message, throwable);
println("testLogExceptions", json);
}
void assertEntryEquals(ContainerNode rootNode, String key, String value) {
JsonNode node = assertNodeContains(rootNode, key);
assertEquals(value, node.getTextValue());
}
private JsonNode assertNodeContains(ContainerNode rootNode, String key) {
JsonNode node = rootNode.get(key);
if (node == null) {
fail("No entry of name \"" + key + "\" found in " + rootNode.toString());
}
return node;
}
void assertEntryEquals(ContainerNode rootNode, String key, long value) {
JsonNode node = assertNodeContains(rootNode, key);
assertEquals(value, node.getNumberValue());
}
/**
* Print out what's going on. The logging APIs aren't used and the text
* delimited for more details
*
* @param name name of operation
* @param text text to print
*/
private void println(String name, String text) {
System.out.println(name + ": #" + text + "#");
}
private String logOut(String message, Throwable throwable) {
StringWriter writer = new StringWriter();
Logger logger = createLogger(writer);
logger.info(message, throwable);
//remove and close the appender
logger.removeAllAppenders();
return writer.toString();
}
public Logger createLogger(Writer writer) {
TestLoggerRepository repo = new TestLoggerRepository();
Logger logger = repo.getLogger("test");
Log4Json layout = new Log4Json();
WriterAppender appender = new WriterAppender(layout, writer);
logger.addAppender(appender);
return logger;
}
/**
* This test logger avoids integrating with the main runtimes Logger hierarchy
* in ways the reader does not want to know.
*/
private static class TestLogger extends Logger {
private TestLogger(String name, LoggerRepository repo) {
super(name);
repository = repo;
setLevel(Level.INFO);
}
}
public static class TestLoggerRepository implements LoggerRepository {
@Override
public void addHierarchyEventListener(HierarchyEventListener listener) {
}
@Override
public boolean isDisabled(int level) {
return false;
}
@Override
public void setThreshold(Level level) {
}
@Override
public void setThreshold(String val) {
}
@Override
public void emitNoAppenderWarning(Category cat) {
}
@Override
public Level getThreshold() {
return Level.ALL;
}
@Override
public Logger getLogger(String name) {
return new TestLogger(name, this);
}
@Override
public Logger getLogger(String name, LoggerFactory factory) {
return new TestLogger(name, this);
}
@Override
public Logger getRootLogger() {
return new TestLogger("root", this);
}
@Override
public Logger exists(String name) {
return null;
}
@Override
public void shutdown() {
}
@Override
public Enumeration getCurrentLoggers() {
return new Vector().elements();
}
@Override
public Enumeration getCurrentCategories() {
return new Vector().elements();
}
@Override
public void fireAddAppenderEvent(Category logger, Appender appender) {
}
@Override
public void resetConfiguration() {
}
}
}
| 8,194 | 29.128676 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/TestTraceUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import static org.junit.Assert.assertEquals;
import java.util.LinkedList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
import org.apache.htrace.HTraceConfiguration;
import org.junit.Test;
public class TestTraceUtils {
private static String TEST_PREFIX = "test.prefix.htrace.";
@Test
public void testWrappedHadoopConf() {
String key = "sampler";
String value = "ProbabilitySampler";
Configuration conf = new Configuration();
conf.set(TEST_PREFIX + key, value);
HTraceConfiguration wrapped = TraceUtils.wrapHadoopConf(TEST_PREFIX, conf);
assertEquals(value, wrapped.get(key));
}
@Test
public void testExtraConfig() {
String key = "test.extra.config";
String oldValue = "old value";
String newValue = "new value";
Configuration conf = new Configuration();
conf.set(TEST_PREFIX + key, oldValue);
LinkedList<ConfigurationPair> extraConfig =
new LinkedList<ConfigurationPair>();
extraConfig.add(new ConfigurationPair(TEST_PREFIX + key, newValue));
HTraceConfiguration wrapped = TraceUtils.wrapHadoopConf(TEST_PREFIX, conf, extraConfig);
assertEquals(newValue, wrapped.get(key));
}
}
| 2,074 | 37.425926 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import com.google.common.base.Supplier;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.htrace.Span;
import org.apache.htrace.SpanReceiver;
import org.apache.htrace.HTraceConfiguration;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeoutException;
import org.junit.Assert;
/**
* Span receiver that puts all spans into a single set.
* This is useful for testing.
* <p/>
* We're not using HTrace's POJOReceiver here so as that doesn't
* push all the metrics to a static place, and would make testing
* SpanReceiverHost harder.
*/
public class SetSpanReceiver implements SpanReceiver {
public SetSpanReceiver(HTraceConfiguration conf) {
}
public void receiveSpan(Span span) {
SetHolder.spans.put(span.getSpanId(), span);
}
public void close() {
}
public static void clear() {
SetHolder.spans.clear();
}
public static int size() {
return SetHolder.spans.size();
}
public static Collection<Span> getSpans() {
return SetHolder.spans.values();
}
public static Map<String, List<Span>> getMap() {
return SetHolder.getMap();
}
public static class SetHolder {
public static ConcurrentHashMap<Long, Span> spans =
new ConcurrentHashMap<Long, Span>();
public static Map<String, List<Span>> getMap() {
Map<String, List<Span>> map = new HashMap<String, List<Span>>();
for (Span s : spans.values()) {
List<Span> l = map.get(s.getDescription());
if (l == null) {
l = new LinkedList<Span>();
map.put(s.getDescription(), l);
}
l.add(s);
}
return map;
}
}
public static void assertSpanNamesFound(final String[] expectedSpanNames) {
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
for (String spanName : expectedSpanNames) {
if (!map.containsKey(spanName)) {
return false;
}
}
return true;
}
}, 100, 1000);
} catch (TimeoutException e) {
Assert.fail("timed out to get expected spans: " + e.getMessage());
} catch (InterruptedException e) {
Assert.fail("interrupted while waiting spans: " + e.getMessage());
}
}
}
| 3,333 | 29.309091 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import static org.junit.Assert.*;
import java.net.InetSocketAddress;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.SshFenceByTcpPort.Args;
import org.apache.log4j.Level;
import org.junit.Assume;
import org.junit.Test;
public class TestSshFenceByTcpPort {
static {
((Log4JLogger)SshFenceByTcpPort.LOG).getLogger().setLevel(Level.ALL);
}
private static String TEST_FENCING_HOST = System.getProperty(
"test.TestSshFenceByTcpPort.host", "localhost");
private static final String TEST_FENCING_PORT = System.getProperty(
"test.TestSshFenceByTcpPort.port", "8020");
private static final String TEST_KEYFILE = System.getProperty(
"test.TestSshFenceByTcpPort.key");
private static final InetSocketAddress TEST_ADDR =
new InetSocketAddress(TEST_FENCING_HOST,
Integer.valueOf(TEST_FENCING_PORT));
private static final HAServiceTarget TEST_TARGET =
new DummyHAService(HAServiceState.ACTIVE, TEST_ADDR);
/**
* Connect to Google's DNS server - not running ssh!
*/
private static final HAServiceTarget UNFENCEABLE_TARGET =
new DummyHAService(HAServiceState.ACTIVE,
new InetSocketAddress("8.8.8.8", 1234));
@Test(timeout=20000)
public void testFence() throws BadFencingConfigurationException {
Assume.assumeTrue(isConfigured());
Configuration conf = new Configuration();
conf.set(SshFenceByTcpPort.CONF_IDENTITIES_KEY, TEST_KEYFILE);
SshFenceByTcpPort fence = new SshFenceByTcpPort();
fence.setConf(conf);
assertTrue(fence.tryFence(
TEST_TARGET,
null));
}
/**
* Test connecting to a host which definitely won't respond.
* Make sure that it times out and returns false, but doesn't throw
* any exception
*/
@Test(timeout=20000)
public void testConnectTimeout() throws BadFencingConfigurationException {
Configuration conf = new Configuration();
conf.setInt(SshFenceByTcpPort.CONF_CONNECT_TIMEOUT_KEY, 3000);
SshFenceByTcpPort fence = new SshFenceByTcpPort();
fence.setConf(conf);
assertFalse(fence.tryFence(UNFENCEABLE_TARGET, ""));
}
@Test
public void testArgsParsing() throws BadFencingConfigurationException {
Args args = new SshFenceByTcpPort.Args(null);
assertEquals(System.getProperty("user.name"), args.user);
assertEquals(22, args.sshPort);
args = new SshFenceByTcpPort.Args("");
assertEquals(System.getProperty("user.name"), args.user);
assertEquals(22, args.sshPort);
args = new SshFenceByTcpPort.Args("12345");
assertEquals("12345", args.user);
assertEquals(22, args.sshPort);
args = new SshFenceByTcpPort.Args(":12345");
assertEquals(System.getProperty("user.name"), args.user);
assertEquals(12345, args.sshPort);
args = new SshFenceByTcpPort.Args("foo:2222");
assertEquals("foo", args.user);
assertEquals(2222, args.sshPort);
}
@Test
public void testBadArgsParsing() throws BadFencingConfigurationException {
assertBadArgs(":"); // No port specified
assertBadArgs("bar.com:"); // "
assertBadArgs(":xx"); // Port does not parse
assertBadArgs("bar.com:xx"); // "
}
private void assertBadArgs(String argStr) {
try {
new Args(argStr);
fail("Did not fail on bad args: " + argStr);
} catch (BadFencingConfigurationException e) {
// Expected
}
}
private boolean isConfigured() {
return (TEST_FENCING_HOST != null && !TEST_FENCING_HOST.isEmpty()) &&
(TEST_FENCING_PORT != null && !TEST_FENCING_PORT.isEmpty()) &&
(TEST_KEYFILE != null && !TEST_KEYFILE.isEmpty());
}
}
| 4,597 | 34.369231 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestShellCommandFencer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import static org.junit.Assert.*;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import static org.mockito.Mockito.spy;
public class TestShellCommandFencer {
private ShellCommandFencer fencer = createFencer();
private static final HAServiceTarget TEST_TARGET =
new DummyHAService(HAServiceState.ACTIVE,
new InetSocketAddress("dummyhost", 1234));
@BeforeClass
public static void setupLogSpy() {
ShellCommandFencer.LOG = spy(ShellCommandFencer.LOG);
}
@Before
public void resetLogSpy() {
Mockito.reset(ShellCommandFencer.LOG);
}
private static ShellCommandFencer createFencer() {
Configuration conf = new Configuration();
conf.set("in.fencing.tests", "yessir");
ShellCommandFencer fencer = new ShellCommandFencer();
fencer.setConf(conf);
return fencer;
}
/**
* Test that the exit code of the script determines
* whether the fencer succeeded or failed
*/
@Test
public void testBasicSuccessFailure() {
assertTrue(fencer.tryFence(TEST_TARGET, "echo"));
assertFalse(fencer.tryFence(TEST_TARGET, "exit 1"));
// bad path should also fail
assertFalse(fencer.tryFence(TEST_TARGET, "xxxxxxxxxxxx"));
}
@Test
public void testCheckNoArgs() {
try {
Configuration conf = new Configuration();
new NodeFencer(conf, "shell");
fail("Didn't throw when passing no args to shell");
} catch (BadFencingConfigurationException confe) {
assertTrue(
"Unexpected exception:" + StringUtils.stringifyException(confe),
confe.getMessage().contains("No argument passed"));
}
}
@Test
public void testCheckParensNoArgs() {
try {
Configuration conf = new Configuration();
new NodeFencer(conf, "shell()");
fail("Didn't throw when passing no args to shell");
} catch (BadFencingConfigurationException confe) {
assertTrue(
"Unexpected exception:" + StringUtils.stringifyException(confe),
confe.getMessage().contains("Unable to parse line: 'shell()'"));
}
}
/**
* Test that lines on stdout get passed as INFO
* level messages
*/
@Test
public void testStdoutLogging() {
assertTrue(fencer.tryFence(TEST_TARGET, "echo hello"));
Mockito.verify(ShellCommandFencer.LOG).info(
Mockito.endsWith("echo hello: hello"));
}
/**
* Test that lines on stderr get passed as
* WARN level log messages
*/
@Test
public void testStderrLogging() {
assertTrue(fencer.tryFence(TEST_TARGET, "echo hello>&2"));
Mockito.verify(ShellCommandFencer.LOG).warn(
Mockito.endsWith("echo hello>&2: hello"));
}
/**
* Verify that the Configuration gets passed as
* environment variables to the fencer.
*/
@Test
public void testConfAsEnvironment() {
if (!Shell.WINDOWS) {
fencer.tryFence(TEST_TARGET, "echo $in_fencing_tests");
Mockito.verify(ShellCommandFencer.LOG).info(
Mockito.endsWith("echo $in...ing_tests: yessir"));
} else {
fencer.tryFence(TEST_TARGET, "echo %in_fencing_tests%");
Mockito.verify(ShellCommandFencer.LOG).info(
Mockito.endsWith("echo %in...ng_tests%: yessir"));
}
}
/**
* Verify that information about the fencing target gets passed as
* environment variables to the fencer.
*/
@Test
public void testTargetAsEnvironment() {
if (!Shell.WINDOWS) {
fencer.tryFence(TEST_TARGET, "echo $target_host $target_port");
Mockito.verify(ShellCommandFencer.LOG).info(
Mockito.endsWith("echo $ta...rget_port: dummyhost 1234"));
} else {
fencer.tryFence(TEST_TARGET, "echo %target_host% %target_port%");
Mockito.verify(ShellCommandFencer.LOG).info(
Mockito.endsWith("echo %ta...get_port%: dummyhost 1234"));
}
}
/**
* Test that we properly close off our input to the subprocess
* such that it knows there's no tty connected. This is important
* so that, if we use 'ssh', it won't try to prompt for a password
* and block forever, for example.
*/
@Test(timeout=10000)
public void testSubprocessInputIsClosed() {
assertFalse(fencer.tryFence(TEST_TARGET, "read"));
}
@Test
public void testCommandAbbreviation() {
assertEquals("a...f", ShellCommandFencer.abbreviate("abcdef", 5));
assertEquals("abcdef", ShellCommandFencer.abbreviate("abcdef", 6));
assertEquals("abcdef", ShellCommandFencer.abbreviate("abcdef", 7));
assertEquals("a...g", ShellCommandFencer.abbreviate("abcdefg", 5));
assertEquals("a...h", ShellCommandFencer.abbreviate("abcdefgh", 5));
assertEquals("a...gh", ShellCommandFencer.abbreviate("abcdefgh", 6));
assertEquals("ab...gh", ShellCommandFencer.abbreviate("abcdefgh", 7));
}
}
| 5,871 | 32.175141 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverControllerStress.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* Stress test for ZKFailoverController.
* Starts multiple ZKFCs for dummy services, and then performs many automatic
* failovers. While doing so, ensures that a fake "shared resource"
* (simulating the shared edits dir) is only owned by one service at a time.
*/
public class TestZKFailoverControllerStress extends ClientBaseWithFixes {
private static final int STRESS_RUNTIME_SECS = 30;
private static final int EXTRA_TIMEOUT_SECS = 10;
private Configuration conf;
private MiniZKFCCluster cluster;
@Before
public void setupConfAndServices() throws Exception {
conf = new Configuration();
conf.set(ZKFailoverController.ZK_QUORUM_KEY, hostPort);
this.cluster = new MiniZKFCCluster(conf, getServer(serverFactory));
}
@After
public void stopCluster() throws Exception {
if (cluster != null) {
cluster.stop();
}
}
/**
* Simply fail back and forth between two services for the
* configured amount of time, via expiring their ZK sessions.
*/
@Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
public void testExpireBackAndForth() throws Exception {
cluster.start();
long st = Time.now();
long runFor = STRESS_RUNTIME_SECS * 1000;
int i = 0;
while (Time.now() - st < runFor) {
// flip flop the services back and forth
int from = i % 2;
int to = (i + 1) % 2;
// Expire one service, it should fail over to the other
LOG.info("Failing over via expiration from " + from + " to " + to);
cluster.expireAndVerifyFailover(from, to);
i++;
}
}
/**
* Randomly expire the ZK sessions of the two ZKFCs. This differs
* from the above test in that it is not a controlled failover -
* we just do random expirations and expect neither one to ever
* generate fatal exceptions.
*/
@Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
public void testRandomExpirations() throws Exception {
cluster.start();
long st = Time.now();
long runFor = STRESS_RUNTIME_SECS * 1000;
Random r = new Random();
while (Time.now() - st < runFor) {
cluster.getTestContext().checkException();
int targetIdx = r.nextInt(2);
ActiveStandbyElector target = cluster.getElector(targetIdx);
long sessId = target.getZKSessionIdForTests();
if (sessId != -1) {
LOG.info(String.format("Expiring session %x for svc %d",
sessId, targetIdx));
getServer(serverFactory).closeSession(sessId);
}
Thread.sleep(r.nextInt(300));
}
}
/**
* Have the services fail their health checks half the time,
* causing the master role to bounce back and forth in the
* cluster. Meanwhile, causes ZK to disconnect clients every
* 50ms, to trigger the retry code and failures to become active.
*/
@Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000)
public void testRandomHealthAndDisconnects() throws Exception {
long runFor = STRESS_RUNTIME_SECS * 1000;
Mockito.doAnswer(new RandomlyThrow(0))
.when(cluster.getService(0).proxy).monitorHealth();
Mockito.doAnswer(new RandomlyThrow(1))
.when(cluster.getService(1).proxy).monitorHealth();
conf.setInt(CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_KEY, 100);
// Don't start until after the above mocking. Otherwise we can get
// Mockito errors if the HM calls the proxy in the middle of
// setting up the mock.
cluster.start();
long st = Time.now();
while (Time.now() - st < runFor) {
cluster.getTestContext().checkException();
serverFactory.closeAll();
Thread.sleep(50);
}
}
/**
* Randomly throw an exception half the time the method is called
*/
@SuppressWarnings("rawtypes")
private static class RandomlyThrow implements Answer {
private Random r = new Random();
private final int svcIdx;
public RandomlyThrow(int svcIdx) {
this.svcIdx = svcIdx;
}
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
if (r.nextBoolean()) {
LOG.info("Throwing an exception for svc " + svcIdx);
throw new HealthCheckFailedException("random failure");
}
return invocation.callRealMethod();
}
}
}
| 5,472 | 33.20625 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HealthMonitor.State;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.data.Stat;
import org.apache.zookeeper.server.ZooKeeperServer;
import com.google.common.base.Preconditions;
import com.google.common.primitives.Ints;
/**
* Harness for starting two dummy ZK FailoverControllers, associated with
* DummyHAServices. This harness starts two such ZKFCs, designated by
* indexes 0 and 1, and provides utilities for building tests around them.
*/
public class MiniZKFCCluster {
private final TestContext ctx;
private final ZooKeeperServer zks;
private DummyHAService svcs[];
private DummyZKFCThread thrs[];
private Configuration conf;
private DummySharedResource sharedResource = new DummySharedResource();
private static final Log LOG = LogFactory.getLog(MiniZKFCCluster.class);
public MiniZKFCCluster(Configuration conf, ZooKeeperServer zks) {
this.conf = conf;
// Fast check interval so tests run faster
conf.setInt(CommonConfigurationKeys.HA_HM_CHECK_INTERVAL_KEY, 50);
conf.setInt(CommonConfigurationKeys.HA_HM_CONNECT_RETRY_INTERVAL_KEY, 50);
conf.setInt(CommonConfigurationKeys.HA_HM_SLEEP_AFTER_DISCONNECT_KEY, 50);
svcs = new DummyHAService[2];
svcs[0] = new DummyHAService(HAServiceState.INITIALIZING,
new InetSocketAddress("svc1", 1234));
svcs[0].setSharedResource(sharedResource);
svcs[1] = new DummyHAService(HAServiceState.INITIALIZING,
new InetSocketAddress("svc2", 1234));
svcs[1].setSharedResource(sharedResource);
this.ctx = new TestContext();
this.zks = zks;
}
/**
* Set up two services and their failover controllers. svc1 is started
* first, so that it enters ACTIVE state, and then svc2 is started,
* which enters STANDBY
*/
public void start() throws Exception {
// Format the base dir, should succeed
thrs = new DummyZKFCThread[2];
thrs[0] = new DummyZKFCThread(ctx, svcs[0]);
assertEquals(0, thrs[0].zkfc.run(new String[]{"-formatZK"}));
ctx.addThread(thrs[0]);
thrs[0].start();
LOG.info("Waiting for svc0 to enter active state");
waitForHAState(0, HAServiceState.ACTIVE);
LOG.info("Adding svc1");
thrs[1] = new DummyZKFCThread(ctx, svcs[1]);
thrs[1].start();
waitForHAState(1, HAServiceState.STANDBY);
}
/**
* Stop the services.
* @throws Exception if either of the services had encountered a fatal error
*/
public void stop() throws Exception {
for (DummyZKFCThread thr : thrs) {
if (thr != null) {
thr.interrupt();
}
}
if (ctx != null) {
ctx.stop();
}
sharedResource.assertNoViolations();
}
/**
* @return the TestContext implementation used internally. This allows more
* threads to be added to the context, etc.
*/
public TestContext getTestContext() {
return ctx;
}
public DummyHAService getService(int i) {
return svcs[i];
}
public ActiveStandbyElector getElector(int i) {
return thrs[i].zkfc.getElectorForTests();
}
public DummyZKFC getZkfc(int i) {
return thrs[i].zkfc;
}
public void setHealthy(int idx, boolean healthy) {
svcs[idx].isHealthy = healthy;
}
public void setFailToBecomeActive(int idx, boolean doFail) {
svcs[idx].failToBecomeActive = doFail;
}
public void setFailToBecomeStandby(int idx, boolean doFail) {
svcs[idx].failToBecomeStandby = doFail;
}
public void setFailToFence(int idx, boolean doFail) {
svcs[idx].failToFence = doFail;
}
public void setUnreachable(int idx, boolean unreachable) {
svcs[idx].actUnreachable = unreachable;
}
/**
* Wait for the given HA service to enter the given HA state.
* This is based on the state of ZKFC, not the state of HA service.
* There could be difference between the two. For example,
* When the service becomes unhealthy, ZKFC will quit ZK election and
* transition to HAServiceState.INITIALIZING and remain in that state
* until the service becomes healthy.
*/
public void waitForHAState(int idx, HAServiceState state)
throws Exception {
DummyZKFC svc = getZkfc(idx);
while (svc.getServiceState() != state) {
ctx.checkException();
Thread.sleep(50);
}
}
/**
* Wait for the ZKFC to be notified of a change in health state.
*/
public void waitForHealthState(int idx, State state)
throws Exception {
ZKFCTestUtil.waitForHealthState(thrs[idx].zkfc, state, ctx);
}
/**
* Wait for the given elector to enter the given elector state.
* @param idx the service index (0 or 1)
* @param state the state to wait for
* @throws Exception if it times out, or an exception occurs on one
* of the ZKFC threads while waiting.
*/
public void waitForElectorState(int idx,
ActiveStandbyElector.State state) throws Exception {
ActiveStandbyElectorTestUtil.waitForElectorState(ctx,
getElector(idx), state);
}
/**
* Expire the ZK session of the given service. This requires
* (and asserts) that the given service be the current active.
* @throws NoNodeException if no service holds the lock
*/
public void expireActiveLockHolder(int idx)
throws NoNodeException {
Stat stat = new Stat();
byte[] data = zks.getZKDatabase().getData(
DummyZKFC.LOCK_ZNODE, stat, null);
assertArrayEquals(Ints.toByteArray(svcs[idx].index), data);
long session = stat.getEphemeralOwner();
LOG.info("Expiring svc " + idx + "'s zookeeper session " + session);
zks.closeSession(session);
}
/**
* Wait for the given HA service to become the active lock holder.
* If the passed svc is null, waits for there to be no active
* lock holder.
*/
public void waitForActiveLockHolder(Integer idx)
throws Exception {
DummyHAService svc = idx == null ? null : svcs[idx];
ActiveStandbyElectorTestUtil.waitForActiveLockData(ctx, zks,
DummyZKFC.SCOPED_PARENT_ZNODE,
(idx == null) ? null : Ints.toByteArray(svc.index));
}
/**
* Expires the ZK session associated with service 'fromIdx', and waits
* until service 'toIdx' takes over.
* @throws Exception if the target service does not become active
*/
public void expireAndVerifyFailover(int fromIdx, int toIdx)
throws Exception {
Preconditions.checkArgument(fromIdx != toIdx);
getElector(fromIdx).preventSessionReestablishmentForTests();
try {
expireActiveLockHolder(fromIdx);
waitForHAState(fromIdx, HAServiceState.STANDBY);
waitForHAState(toIdx, HAServiceState.ACTIVE);
} finally {
getElector(fromIdx).allowSessionReestablishmentForTests();
}
}
/**
* Test-thread which runs a ZK Failover Controller corresponding
* to a given dummy service.
*/
private class DummyZKFCThread extends TestingThread {
private final DummyZKFC zkfc;
public DummyZKFCThread(TestContext ctx, DummyHAService svc) {
super(ctx);
this.zkfc = new DummyZKFC(conf, svc);
}
@Override
public void doWork() throws Exception {
try {
assertEquals(0, zkfc.run(new String[0]));
} catch (InterruptedException ie) {
// Interrupted by main thread, that's OK.
}
}
}
static class DummyZKFC extends ZKFailoverController {
private static final String DUMMY_CLUSTER = "dummy-cluster";
public static final String SCOPED_PARENT_ZNODE =
ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT + "/" +
DUMMY_CLUSTER;
private static final String LOCK_ZNODE =
SCOPED_PARENT_ZNODE + "/" + ActiveStandbyElector.LOCK_FILENAME;
private final DummyHAService localTarget;
public DummyZKFC(Configuration conf, DummyHAService localTarget) {
super(conf, localTarget);
this.localTarget = localTarget;
}
@Override
protected byte[] targetToData(HAServiceTarget target) {
return Ints.toByteArray(((DummyHAService)target).index);
}
@Override
protected HAServiceTarget dataToTarget(byte[] data) {
int index = Ints.fromByteArray(data);
return DummyHAService.getInstance(index);
}
@Override
protected void loginAsFCUser() throws IOException {
}
@Override
protected String getScopeInsideParentNode() {
return DUMMY_CLUSTER;
}
@Override
protected void checkRpcAdminAccess() throws AccessControlException {
}
@Override
protected InetSocketAddress getRpcAddressToBindTo() {
return new InetSocketAddress(0);
}
@Override
protected void initRPC() throws IOException {
super.initRPC();
localTarget.zkfcProxy = this.getRpcServerForTests();
}
@Override
protected PolicyProvider getPolicyProvider() {
return null;
}
}
}
| 10,346 | 30.935185 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestNodeFencer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import static org.junit.Assert.*;
import java.net.InetSocketAddress;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.Shell;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.collect.Lists;
public class TestNodeFencer {
private HAServiceTarget MOCK_TARGET;
// Fencer shell commands that always return true on Unix and Windows
// respectively. Lacking the POSIX 'true' command on Windows, we use
// the batch command 'rem'.
private static String FENCER_TRUE_COMMAND_UNIX = "shell(true)";
private static String FENCER_TRUE_COMMAND_WINDOWS = "shell(rem)";
@Before
public void clearMockState() {
AlwaysSucceedFencer.fenceCalled = 0;
AlwaysSucceedFencer.callArgs.clear();
AlwaysFailFencer.fenceCalled = 0;
AlwaysFailFencer.callArgs.clear();
MOCK_TARGET = Mockito.mock(HAServiceTarget.class);
Mockito.doReturn("my mock").when(MOCK_TARGET).toString();
Mockito.doReturn(new InetSocketAddress("host", 1234))
.when(MOCK_TARGET).getAddress();
}
private static String getFencerTrueCommand() {
return Shell.WINDOWS ?
FENCER_TRUE_COMMAND_WINDOWS : FENCER_TRUE_COMMAND_UNIX;
}
@Test
public void testSingleFencer() throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer(
AlwaysSucceedFencer.class.getName() + "(foo)");
assertTrue(fencer.fence(MOCK_TARGET));
assertEquals(1, AlwaysSucceedFencer.fenceCalled);
assertSame(MOCK_TARGET, AlwaysSucceedFencer.fencedSvc);
assertEquals("foo", AlwaysSucceedFencer.callArgs.get(0));
}
@Test
public void testMultipleFencers() throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer(
AlwaysSucceedFencer.class.getName() + "(foo)\n" +
AlwaysSucceedFencer.class.getName() + "(bar)\n");
assertTrue(fencer.fence(MOCK_TARGET));
// Only one call, since the first fencer succeeds
assertEquals(1, AlwaysSucceedFencer.fenceCalled);
assertEquals("foo", AlwaysSucceedFencer.callArgs.get(0));
}
@Test
public void testWhitespaceAndCommentsInConfig()
throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer(
"\n" +
" # the next one will always fail\n" +
" " + AlwaysFailFencer.class.getName() + "(foo) # <- fails\n" +
AlwaysSucceedFencer.class.getName() + "(bar) \n");
assertTrue(fencer.fence(MOCK_TARGET));
// One call to each, since top fencer fails
assertEquals(1, AlwaysFailFencer.fenceCalled);
assertSame(MOCK_TARGET, AlwaysFailFencer.fencedSvc);
assertEquals(1, AlwaysSucceedFencer.fenceCalled);
assertSame(MOCK_TARGET, AlwaysSucceedFencer.fencedSvc);
assertEquals("foo", AlwaysFailFencer.callArgs.get(0));
assertEquals("bar", AlwaysSucceedFencer.callArgs.get(0));
}
@Test
public void testArglessFencer() throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer(
AlwaysSucceedFencer.class.getName());
assertTrue(fencer.fence(MOCK_TARGET));
// One call to each, since top fencer fails
assertEquals(1, AlwaysSucceedFencer.fenceCalled);
assertSame(MOCK_TARGET, AlwaysSucceedFencer.fencedSvc);
assertEquals(null, AlwaysSucceedFencer.callArgs.get(0));
}
@Test
public void testShortNameShell() throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer(getFencerTrueCommand());
assertTrue(fencer.fence(MOCK_TARGET));
}
@Test
public void testShortNameSsh() throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer("sshfence");
assertFalse(fencer.fence(MOCK_TARGET));
}
@Test
public void testShortNameSshWithUser() throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer("sshfence(user)");
assertFalse(fencer.fence(MOCK_TARGET));
}
@Test
public void testShortNameSshWithPort() throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer("sshfence(:123)");
assertFalse(fencer.fence(MOCK_TARGET));
}
@Test
public void testShortNameSshWithUserPort() throws BadFencingConfigurationException {
NodeFencer fencer = setupFencer("sshfence(user:123)");
assertFalse(fencer.fence(MOCK_TARGET));
}
public static NodeFencer setupFencer(String confStr)
throws BadFencingConfigurationException {
System.err.println("Testing configuration:\n" + confStr);
Configuration conf = new Configuration();
return new NodeFencer(conf, confStr);
}
/**
* Mock fencing method that always returns true
*/
public static class AlwaysSucceedFencer extends Configured
implements FenceMethod {
static int fenceCalled = 0;
static HAServiceTarget fencedSvc;
static List<String> callArgs = Lists.newArrayList();
@Override
public boolean tryFence(HAServiceTarget target, String args) {
fencedSvc = target;
callArgs.add(args);
fenceCalled++;
return true;
}
@Override
public void checkArgs(String args) {
}
public static HAServiceTarget getLastFencedService() {
return fencedSvc;
}
}
/**
* Identical mock to above, except always returns false
*/
public static class AlwaysFailFencer extends Configured
implements FenceMethod {
static int fenceCalled = 0;
static HAServiceTarget fencedSvc;
static List<String> callArgs = Lists.newArrayList();
@Override
public boolean tryFence(HAServiceTarget target, String args) {
fencedSvc = target;
callArgs.add(args);
fenceCalled++;
return false;
}
@Override
public void checkArgs(String args) {
}
}
}
| 6,588 | 32.446701 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHealthMonitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HealthMonitor.Callback;
import org.apache.hadoop.ha.HealthMonitor.State;
import org.apache.hadoop.util.Time;
import org.junit.Before;
import org.junit.Test;
public class TestHealthMonitor {
private static final Log LOG = LogFactory.getLog(
TestHealthMonitor.class);
/** How many times has createProxy been called */
private AtomicInteger createProxyCount = new AtomicInteger(0);
private volatile boolean throwOOMEOnCreate = false;
private HealthMonitor hm;
private DummyHAService svc;
@Before
public void setupHM() throws InterruptedException, IOException {
Configuration conf = new Configuration();
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
conf.setInt(CommonConfigurationKeys.HA_HM_CHECK_INTERVAL_KEY, 50);
conf.setInt(CommonConfigurationKeys.HA_HM_CONNECT_RETRY_INTERVAL_KEY, 50);
conf.setInt(CommonConfigurationKeys.HA_HM_SLEEP_AFTER_DISCONNECT_KEY, 50);
svc = new DummyHAService(HAServiceState.ACTIVE,
new InetSocketAddress("0.0.0.0", 0), true);
hm = new HealthMonitor(conf, svc) {
@Override
protected HAServiceProtocol createProxy() throws IOException {
createProxyCount.incrementAndGet();
if (throwOOMEOnCreate) {
throw new OutOfMemoryError("oome");
}
return super.createProxy();
}
};
LOG.info("Starting health monitor");
hm.start();
LOG.info("Waiting for HEALTHY signal");
waitForState(hm, HealthMonitor.State.SERVICE_HEALTHY);
}
@Test(timeout=15000)
public void testMonitor() throws Exception {
LOG.info("Mocking bad health check, waiting for UNHEALTHY");
svc.isHealthy = false;
waitForState(hm, HealthMonitor.State.SERVICE_UNHEALTHY);
LOG.info("Returning to healthy state, waiting for HEALTHY");
svc.isHealthy = true;
waitForState(hm, HealthMonitor.State.SERVICE_HEALTHY);
LOG.info("Returning an IOException, as if node went down");
// should expect many rapid retries
int countBefore = createProxyCount.get();
svc.actUnreachable = true;
waitForState(hm, HealthMonitor.State.SERVICE_NOT_RESPONDING);
// Should retry several times
while (createProxyCount.get() < countBefore + 3) {
Thread.sleep(10);
}
LOG.info("Returning to healthy state, waiting for HEALTHY");
svc.actUnreachable = false;
waitForState(hm, HealthMonitor.State.SERVICE_HEALTHY);
hm.shutdown();
hm.join();
assertFalse(hm.isAlive());
}
/**
* Test that the proper state is propagated when the health monitor
* sees an uncaught exception in its thread.
*/
@Test(timeout=15000)
public void testHealthMonitorDies() throws Exception {
LOG.info("Mocking RTE in health monitor, waiting for FAILED");
throwOOMEOnCreate = true;
svc.actUnreachable = true;
waitForState(hm, HealthMonitor.State.HEALTH_MONITOR_FAILED);
hm.shutdown();
hm.join();
assertFalse(hm.isAlive());
}
/**
* Test that, if the callback throws an RTE, this will terminate the
* health monitor and thus change its state to FAILED
* @throws Exception
*/
@Test(timeout=15000)
public void testCallbackThrowsRTE() throws Exception {
hm.addCallback(new Callback() {
@Override
public void enteredState(State newState) {
throw new RuntimeException("Injected RTE");
}
});
LOG.info("Mocking bad health check, waiting for UNHEALTHY");
svc.isHealthy = false;
waitForState(hm, HealthMonitor.State.HEALTH_MONITOR_FAILED);
}
private void waitForState(HealthMonitor hm, State state)
throws InterruptedException {
long st = Time.now();
while (Time.now() - st < 2000) {
if (hm.getHealthState() == state) {
return;
}
Thread.sleep(50);
}
assertEquals(state, hm.getHealthState());
}
}
| 5,134 | 32.782895 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
import org.apache.hadoop.ha.TestNodeFencer.AlwaysFailFencer;
import static org.apache.hadoop.ha.TestNodeFencer.setupFencer;
import org.apache.hadoop.security.AccessControlException;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.stubbing.answers.ThrowsException;
import static org.junit.Assert.*;
public class TestFailoverController {
private InetSocketAddress svc1Addr = new InetSocketAddress("svc1", 1234);
private InetSocketAddress svc2Addr = new InetSocketAddress("svc2", 5678);
private Configuration conf = new Configuration();
HAServiceStatus STATE_NOT_READY = new HAServiceStatus(HAServiceState.STANDBY)
.setNotReadyToBecomeActive("injected not ready");
@Test
public void testFailoverAndFailback() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
AlwaysSucceedFencer.fenceCalled = 0;
doFailover(svc1, svc2, false, false);
assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(HAServiceState.ACTIVE, svc2.state);
AlwaysSucceedFencer.fenceCalled = 0;
doFailover(svc2, svc1, false, false);
assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
assertEquals(HAServiceState.ACTIVE, svc1.state);
assertEquals(HAServiceState.STANDBY, svc2.state);
}
@Test
public void testFailoverFromStandbyToStandby() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.STANDBY, svc1Addr);
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
doFailover(svc1, svc2, false, false);
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(HAServiceState.ACTIVE, svc2.state);
}
@Test
public void testFailoverFromActiveToActive() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
DummyHAService svc2 = new DummyHAService(HAServiceState.ACTIVE, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1, svc2, false, false);
fail("Can't failover to an already active service");
} catch (FailoverFailedException ffe) {
// Expected
}
assertEquals(HAServiceState.ACTIVE, svc1.state);
assertEquals(HAServiceState.ACTIVE, svc2.state);
}
@Test
public void testFailoverWithoutPermission() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
Mockito.doThrow(new AccessControlException("Access denied"))
.when(svc1.proxy).getServiceStatus();
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
Mockito.doThrow(new AccessControlException("Access denied"))
.when(svc2.proxy).getServiceStatus();
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1, svc2, false, false);
fail("Can't failover when access is denied");
} catch (FailoverFailedException ffe) {
assertTrue(ffe.getCause().getMessage().contains("Access denied"));
}
}
@Test
public void testFailoverToUnreadyService() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
Mockito.doReturn(STATE_NOT_READY).when(svc2.proxy)
.getServiceStatus();
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1, svc2, false, false);
fail("Can't failover to a service that's not ready");
} catch (FailoverFailedException ffe) {
// Expected
if (!ffe.getMessage().contains("injected not ready")) {
throw ffe;
}
}
assertEquals(HAServiceState.ACTIVE, svc1.state);
assertEquals(HAServiceState.STANDBY, svc2.state);
// Forcing it means we ignore readyToBecomeActive
doFailover(svc1, svc2, false, true);
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(HAServiceState.ACTIVE, svc2.state);
}
@Test
public void testFailoverToUnhealthyServiceFailsAndFailsback() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
Mockito.doThrow(new HealthCheckFailedException("Failed!"))
.when(svc2.proxy).monitorHealth();
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1, svc2, false, false);
fail("Failover to unhealthy service");
} catch (FailoverFailedException ffe) {
// Expected
}
assertEquals(HAServiceState.ACTIVE, svc1.state);
assertEquals(HAServiceState.STANDBY, svc2.state);
}
@Test
public void testFailoverFromFaultyServiceSucceeds() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
Mockito.doThrow(new ServiceFailedException("Failed!"))
.when(svc1.proxy).transitionToStandby(anyReqInfo());
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
AlwaysSucceedFencer.fenceCalled = 0;
try {
doFailover(svc1, svc2, false, false);
} catch (FailoverFailedException ffe) {
fail("Faulty active prevented failover");
}
// svc1 still thinks it's active, that's OK, it was fenced
assertEquals(1, AlwaysSucceedFencer.fenceCalled);
assertSame(svc1, AlwaysSucceedFencer.fencedSvc);
assertEquals(HAServiceState.ACTIVE, svc1.state);
assertEquals(HAServiceState.ACTIVE, svc2.state);
}
@Test
public void testFailoverFromFaultyServiceFencingFailure() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
Mockito.doThrow(new ServiceFailedException("Failed!"))
.when(svc1.proxy).transitionToStandby(anyReqInfo());
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysFailFencer.class.getName());
AlwaysFailFencer.fenceCalled = 0;
try {
doFailover(svc1, svc2, false, false);
fail("Failed over even though fencing failed");
} catch (FailoverFailedException ffe) {
// Expected
}
assertEquals(1, AlwaysFailFencer.fenceCalled);
assertSame(svc1, AlwaysFailFencer.fencedSvc);
assertEquals(HAServiceState.ACTIVE, svc1.state);
assertEquals(HAServiceState.STANDBY, svc2.state);
}
@Test
public void testFencingFailureDuringFailover() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysFailFencer.class.getName());
AlwaysFailFencer.fenceCalled = 0;
try {
doFailover(svc1, svc2, true, false);
fail("Failed over even though fencing requested and failed");
} catch (FailoverFailedException ffe) {
// Expected
}
// If fencing was requested and it failed we don't try to make
// svc2 active anyway, and we don't failback to svc1.
assertEquals(1, AlwaysFailFencer.fenceCalled);
assertSame(svc1, AlwaysFailFencer.fencedSvc);
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(HAServiceState.STANDBY, svc2.state);
}
@Test
public void testFailoverFromNonExistantServiceWithFencer() throws Exception {
DummyHAService svc1 = spy(new DummyHAService(null, svc1Addr));
// Getting a proxy to a dead server will throw IOException on call,
// not on creation of the proxy.
HAServiceProtocol errorThrowingProxy = Mockito.mock(HAServiceProtocol.class,
Mockito.withSettings()
.defaultAnswer(new ThrowsException(
new IOException("Could not connect to host")))
.extraInterfaces(Closeable.class));
Mockito.doNothing().when((Closeable)errorThrowingProxy).close();
Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy(
Mockito.<Configuration>any(),
Mockito.anyInt());
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1, svc2, false, false);
} catch (FailoverFailedException ffe) {
fail("Non-existant active prevented failover");
}
// Verify that the proxy created to try to make it go to standby
// gracefully used the right rpc timeout
Mockito.verify(svc1).getProxy(
Mockito.<Configuration>any(),
Mockito.eq(
CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT));
// Don't check svc1 because we can't reach it, but that's OK, it's been fenced.
assertEquals(HAServiceState.ACTIVE, svc2.state);
}
@Test
public void testFailoverToNonExistantServiceFails() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
DummyHAService svc2 = spy(new DummyHAService(null, svc2Addr));
Mockito.doThrow(new IOException("Failed to connect"))
.when(svc2).getProxy(Mockito.<Configuration>any(),
Mockito.anyInt());
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1, svc2, false, false);
fail("Failed over to a non-existant standby");
} catch (FailoverFailedException ffe) {
// Expected
}
assertEquals(HAServiceState.ACTIVE, svc1.state);
}
@Test
public void testFailoverToFaultyServiceFailsbackOK() throws Exception {
DummyHAService svc1 = spy(new DummyHAService(HAServiceState.ACTIVE, svc1Addr));
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
Mockito.doThrow(new ServiceFailedException("Failed!"))
.when(svc2.proxy).transitionToActive(anyReqInfo());
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1, svc2, false, false);
fail("Failover to already active service");
} catch (FailoverFailedException ffe) {
// Expected
}
// svc1 went standby then back to active
verify(svc1.proxy).transitionToStandby(anyReqInfo());
verify(svc1.proxy).transitionToActive(anyReqInfo());
assertEquals(HAServiceState.ACTIVE, svc1.state);
assertEquals(HAServiceState.STANDBY, svc2.state);
}
@Test
public void testWeDontFailbackIfActiveWasFenced() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
Mockito.doThrow(new ServiceFailedException("Failed!"))
.when(svc2.proxy).transitionToActive(anyReqInfo());
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1, svc2, true, false);
fail("Failed over to service that won't transition to active");
} catch (FailoverFailedException ffe) {
// Expected
}
// We failed to failover and did not failback because we fenced
// svc1 (we forced it), therefore svc1 and svc2 should be standby.
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(HAServiceState.STANDBY, svc2.state);
}
@Test
public void testWeFenceOnFailbackIfTransitionToActiveFails() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
Mockito.doThrow(new ServiceFailedException("Failed!"))
.when(svc2.proxy).transitionToActive(anyReqInfo());
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
AlwaysSucceedFencer.fenceCalled = 0;
try {
doFailover(svc1, svc2, false, false);
fail("Failed over to service that won't transition to active");
} catch (FailoverFailedException ffe) {
// Expected
}
// We failed to failover. We did not fence svc1 because it cooperated
// and we didn't force it, so we failed back to svc1 and fenced svc2.
// Note svc2 still thinks it's active, that's OK, we fenced it.
assertEquals(HAServiceState.ACTIVE, svc1.state);
assertEquals(1, AlwaysSucceedFencer.fenceCalled);
assertSame(svc2, AlwaysSucceedFencer.fencedSvc);
}
private StateChangeRequestInfo anyReqInfo() {
return Mockito.<StateChangeRequestInfo>any();
}
@Test
public void testFailureToFenceOnFailbackFailsTheFailback() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
Mockito.doThrow(new IOException("Failed!"))
.when(svc2.proxy).transitionToActive(anyReqInfo());
svc1.fencer = svc2.fencer = setupFencer(AlwaysFailFencer.class.getName());
AlwaysFailFencer.fenceCalled = 0;
try {
doFailover(svc1, svc2, false, false);
fail("Failed over to service that won't transition to active");
} catch (FailoverFailedException ffe) {
// Expected
}
// We did not fence svc1 because it cooperated and we didn't force it,
// we failed to failover so we fenced svc2, we failed to fence svc2
// so we did not failback to svc1, ie it's still standby.
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(1, AlwaysFailFencer.fenceCalled);
assertSame(svc2, AlwaysFailFencer.fencedSvc);
}
@Test
public void testFailbackToFaultyServiceFails() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
Mockito.doThrow(new ServiceFailedException("Failed!"))
.when(svc1.proxy).transitionToActive(anyReqInfo());
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
Mockito.doThrow(new ServiceFailedException("Failed!"))
.when(svc2.proxy).transitionToActive(anyReqInfo());
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1, svc2, false, false);
fail("Failover to already active service");
} catch (FailoverFailedException ffe) {
// Expected
}
assertEquals(HAServiceState.STANDBY, svc1.state);
assertEquals(HAServiceState.STANDBY, svc2.state);
}
@Test
public void testSelfFailoverFails() throws Exception {
DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
AlwaysSucceedFencer.fenceCalled = 0;
try {
doFailover(svc1, svc1, false, false);
fail("Can't failover to yourself");
} catch (FailoverFailedException ffe) {
// Expected
}
assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
assertEquals(HAServiceState.ACTIVE, svc1.state);
try {
doFailover(svc2, svc2, false, false);
fail("Can't failover to yourself");
} catch (FailoverFailedException ffe) {
// Expected
}
assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
assertEquals(HAServiceState.STANDBY, svc2.state);
}
private void doFailover(HAServiceTarget tgt1, HAServiceTarget tgt2,
boolean forceFence, boolean forceActive) throws FailoverFailedException {
FailoverController fc = new FailoverController(conf,
RequestSource.REQUEST_BY_USER);
fc.failover(tgt1, tgt2, forceFence, forceActive);
}
}
| 17,378 | 38.951724 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummySharedResource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import org.junit.Assert;
/**
* A fake shared resource, for use in automatic failover testing.
* This simulates a real shared resource like a shared edit log.
* When the {@link DummyHAService} instances change state or get
* fenced, they notify the shared resource, which asserts that
* we never have two HA services who think they're holding the
* resource at the same time.
*/
public class DummySharedResource {
private DummyHAService holder = null;
private int violations = 0;
public synchronized void take(DummyHAService newHolder) {
if (holder == null || holder == newHolder) {
holder = newHolder;
} else {
violations++;
throw new IllegalStateException("already held by: " + holder);
}
}
public synchronized void release(DummyHAService oldHolder) {
if (holder == oldHolder) {
holder = null;
}
}
public synchronized void assertNoViolations() {
Assert.assertEquals(0, violations);
}
}
| 1,806 | 33.09434 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/DummyHAService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import com.google.protobuf.BlockingService;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.mockito.Mockito;
import com.google.common.collect.Lists;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HA_HM_RPC_TIMEOUT_DEFAULT;
/**
* Test-only implementation of {@link HAServiceTarget}, which returns
* a mock implementation.
*/
class DummyHAService extends HAServiceTarget {
public static final Log LOG = LogFactory.getLog(DummyHAService.class);
private static final String DUMMY_FENCE_KEY = "dummy.fence.key";
volatile HAServiceState state;
HAServiceProtocol proxy;
ZKFCProtocol zkfcProxy = null;
NodeFencer fencer;
InetSocketAddress address;
boolean isHealthy = true;
boolean actUnreachable = false;
boolean failToBecomeActive, failToBecomeStandby, failToFence;
DummySharedResource sharedResource;
public int fenceCount = 0;
public int activeTransitionCount = 0;
boolean testWithProtoBufRPC = false;
static ArrayList<DummyHAService> instances = Lists.newArrayList();
int index;
DummyHAService(HAServiceState state, InetSocketAddress address) {
this(state, address, false);
}
DummyHAService(HAServiceState state, InetSocketAddress address,
boolean testWithProtoBufRPC) {
this.state = state;
this.testWithProtoBufRPC = testWithProtoBufRPC;
if (testWithProtoBufRPC) {
this.address = startAndGetRPCServerAddress(address);
} else {
this.address = address;
}
Configuration conf = new Configuration();
this.proxy = makeMock(conf, HA_HM_RPC_TIMEOUT_DEFAULT);
try {
conf.set(DUMMY_FENCE_KEY, DummyFencer.class.getName());
this.fencer = Mockito.spy(
NodeFencer.create(conf, DUMMY_FENCE_KEY));
} catch (BadFencingConfigurationException e) {
throw new RuntimeException(e);
}
synchronized (instances) {
instances.add(this);
this.index = instances.size();
}
}
public void setSharedResource(DummySharedResource rsrc) {
this.sharedResource = rsrc;
}
private InetSocketAddress startAndGetRPCServerAddress(InetSocketAddress serverAddress) {
Configuration conf = new Configuration();
try {
RPC.setProtocolEngine(conf,
HAServiceProtocolPB.class, ProtobufRpcEngine.class);
HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator =
new HAServiceProtocolServerSideTranslatorPB(new MockHAProtocolImpl());
BlockingService haPbService = HAServiceProtocolService
.newReflectiveBlockingService(haServiceProtocolXlator);
Server server = new RPC.Builder(conf)
.setProtocol(HAServiceProtocolPB.class)
.setInstance(haPbService)
.setBindAddress(serverAddress.getHostName())
.setPort(serverAddress.getPort()).build();
server.start();
return NetUtils.getConnectAddress(server);
} catch (IOException e) {
return null;
}
}
private HAServiceProtocol makeMock(Configuration conf, int timeoutMs) {
HAServiceProtocol service;
if (!testWithProtoBufRPC) {
service = new MockHAProtocolImpl();
} else {
try {
service = super.getProxy(conf, timeoutMs);
} catch (IOException e) {
return null;
}
}
return Mockito.spy(service);
}
@Override
public InetSocketAddress getAddress() {
return address;
}
@Override
public InetSocketAddress getZKFCAddress() {
return null;
}
@Override
public HAServiceProtocol getProxy(Configuration conf, int timeout)
throws IOException {
if (testWithProtoBufRPC) {
proxy = makeMock(conf, timeout);
}
return proxy;
}
@Override
public ZKFCProtocol getZKFCProxy(Configuration conf, int timeout)
throws IOException {
assert zkfcProxy != null;
return zkfcProxy;
}
@Override
public NodeFencer getFencer() {
return fencer;
}
@Override
public void checkFencingConfigured() throws BadFencingConfigurationException {
}
@Override
public boolean isAutoFailoverEnabled() {
return true;
}
@Override
public String toString() {
return "DummyHAService #" + index;
}
public static HAServiceTarget getInstance(int serial) {
return instances.get(serial - 1);
}
private class MockHAProtocolImpl implements
HAServiceProtocol, Closeable {
@Override
public void monitorHealth() throws HealthCheckFailedException,
AccessControlException, IOException {
checkUnreachable();
if (!isHealthy) {
throw new HealthCheckFailedException("not healthy");
}
}
@Override
public void transitionToActive(StateChangeRequestInfo req) throws ServiceFailedException,
AccessControlException, IOException {
activeTransitionCount++;
checkUnreachable();
if (failToBecomeActive) {
throw new ServiceFailedException("injected failure");
}
if (sharedResource != null) {
sharedResource.take(DummyHAService.this);
}
state = HAServiceState.ACTIVE;
}
@Override
public void transitionToStandby(StateChangeRequestInfo req) throws ServiceFailedException,
AccessControlException, IOException {
checkUnreachable();
if (failToBecomeStandby) {
throw new ServiceFailedException("injected failure");
}
if (sharedResource != null) {
sharedResource.release(DummyHAService.this);
}
state = HAServiceState.STANDBY;
}
@Override
public HAServiceStatus getServiceStatus() throws IOException {
checkUnreachable();
HAServiceStatus ret = new HAServiceStatus(state);
if (state == HAServiceState.STANDBY || state == HAServiceState.ACTIVE) {
ret.setReadyToBecomeActive();
}
return ret;
}
private void checkUnreachable() throws IOException {
if (actUnreachable) {
throw new IOException("Connection refused (fake)");
}
}
@Override
public void close() throws IOException {
}
}
public static class DummyFencer implements FenceMethod {
@Override
public void checkArgs(String args) throws BadFencingConfigurationException {
}
@Override
public boolean tryFence(HAServiceTarget target, String args)
throws BadFencingConfigurationException {
LOG.info("tryFence(" + target + ")");
DummyHAService svc = (DummyHAService)target;
synchronized (svc) {
svc.fenceCount++;
}
if (svc.failToFence) {
LOG.info("Injected failure to fence");
return false;
}
svc.sharedResource.release(svc);
return true;
}
}
}
| 8,196 | 29.585821 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ActiveStandbyElectorTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.data.Stat;
import org.apache.zookeeper.server.ZooKeeperServer;
public abstract class ActiveStandbyElectorTestUtil {
private static final Log LOG = LogFactory.getLog(
ActiveStandbyElectorTestUtil.class);
private static final long LOG_INTERVAL_MS = 500;
public static void waitForActiveLockData(TestContext ctx,
ZooKeeperServer zks, String parentDir, byte[] activeData)
throws Exception {
long st = Time.now();
long lastPrint = st;
while (true) {
if (ctx != null) {
ctx.checkException();
}
try {
Stat stat = new Stat();
byte[] data = zks.getZKDatabase().getData(
parentDir + "/" +
ActiveStandbyElector.LOCK_FILENAME, stat, null);
if (activeData != null &&
Arrays.equals(activeData, data)) {
return;
}
if (Time.now() > lastPrint + LOG_INTERVAL_MS) {
LOG.info("Cur data: " + StringUtils.byteToHexString(data));
lastPrint = Time.now();
}
} catch (NoNodeException nne) {
if (activeData == null) {
return;
}
if (Time.now() > lastPrint + LOG_INTERVAL_MS) {
LOG.info("Cur data: no node");
lastPrint = Time.now();
}
}
Thread.sleep(50);
}
}
public static void waitForElectorState(TestContext ctx,
ActiveStandbyElector elector,
ActiveStandbyElector.State state) throws Exception {
while (elector.getStateForTests() != state) {
if (ctx != null) {
ctx.checkException();
}
Thread.sleep(50);
}
}
}
| 2,779 | 32.493976 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ZKFCTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import org.apache.hadoop.test.MultithreadedTestUtil;
public class ZKFCTestUtil {
public static void waitForHealthState(ZKFailoverController zkfc,
HealthMonitor.State state,
MultithreadedTestUtil.TestContext ctx) throws Exception {
while (zkfc.getLastHealthState() != state) {
if (ctx != null) {
ctx.checkException();
}
Thread.sleep(50);
}
}
}
| 1,235 | 34.314286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestZKFailoverController.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import static org.junit.Assert.*;
import java.security.NoSuchAlgorithmException;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HealthMonitor.State;
import org.apache.hadoop.ha.MiniZKFCCluster.DummyZKFC;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.data.Stat;
import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
public class TestZKFailoverController extends ClientBaseWithFixes {
private Configuration conf;
private MiniZKFCCluster cluster;
// Set up ZK digest-based credentials for the purposes of the tests,
// to make sure all of our functionality works with auth and ACLs
// present.
private static final String DIGEST_USER_PASS="test-user:test-password";
private static final String TEST_AUTH_GOOD =
"digest:" + DIGEST_USER_PASS;
private static final String DIGEST_USER_HASH;
static {
try {
DIGEST_USER_HASH = DigestAuthenticationProvider.generateDigest(
DIGEST_USER_PASS);
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
private static final String TEST_ACL =
"digest:" + DIGEST_USER_HASH + ":rwcda";
static {
((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(Level.ALL);
}
@Before
public void setupConfAndServices() {
conf = new Configuration();
conf.set(ZKFailoverController.ZK_ACL_KEY, TEST_ACL);
conf.set(ZKFailoverController.ZK_AUTH_KEY, TEST_AUTH_GOOD);
conf.set(ZKFailoverController.ZK_QUORUM_KEY, hostPort);
this.cluster = new MiniZKFCCluster(conf, getServer(serverFactory));
}
/**
* Test that the various command lines for formatting the ZK directory
* function correctly.
*/
@Test(timeout=15000)
public void testFormatZK() throws Exception {
DummyHAService svc = cluster.getService(1);
// Run without formatting the base dir,
// should barf
assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
runFC(svc));
// Format the base dir, should succeed
assertEquals(0, runFC(svc, "-formatZK"));
// Should fail to format if already formatted
assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,
runFC(svc, "-formatZK", "-nonInteractive"));
// Unless '-force' is on
assertEquals(0, runFC(svc, "-formatZK", "-force"));
}
/**
* Test that if ZooKeeper is not running, the correct error
* code is returned.
*/
@Test(timeout=15000)
public void testNoZK() throws Exception {
stopServer();
DummyHAService svc = cluster.getService(1);
assertEquals(ZKFailoverController.ERR_CODE_NO_ZK,
runFC(svc));
}
@Test
public void testFormatOneClusterLeavesOtherClustersAlone() throws Exception {
DummyHAService svc = cluster.getService(1);
DummyZKFC zkfcInOtherCluster = new DummyZKFC(conf, cluster.getService(1)) {
@Override
protected String getScopeInsideParentNode() {
return "other-scope";
}
};
// Run without formatting the base dir,
// should barf
assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
runFC(svc));
// Format the base dir, should succeed
assertEquals(0, runFC(svc, "-formatZK"));
// Run the other cluster without formatting, should barf because
// it uses a different parent znode
assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,
zkfcInOtherCluster.run(new String[]{}));
// Should succeed in formatting the second cluster
assertEquals(0, zkfcInOtherCluster.run(new String[]{"-formatZK"}));
// But should not have deleted the original base node from the first
// cluster
assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,
runFC(svc, "-formatZK", "-nonInteractive"));
}
/**
* Test that automatic failover won't run against a target that hasn't
* explicitly enabled the feature.
*/
@Test(timeout=10000)
public void testWontRunWhenAutoFailoverDisabled() throws Exception {
DummyHAService svc = cluster.getService(1);
svc = Mockito.spy(svc);
Mockito.doReturn(false).when(svc).isAutoFailoverEnabled();
assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,
runFC(svc, "-formatZK"));
assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,
runFC(svc));
}
/**
* Test that, if ACLs are specified in the configuration, that
* it sets the ACLs when formatting the parent node.
*/
@Test(timeout=15000)
public void testFormatSetsAcls() throws Exception {
// Format the base dir, should succeed
DummyHAService svc = cluster.getService(1);
assertEquals(0, runFC(svc, "-formatZK"));
ZooKeeper otherClient = createClient();
try {
// client without auth should not be able to read it
Stat stat = new Stat();
otherClient.getData(ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,
false, stat);
fail("Was able to read data without authenticating!");
} catch (KeeperException.NoAuthException nae) {
// expected
}
}
/**
* Test that the ZKFC won't run if fencing is not configured for the
* local service.
*/
@Test(timeout=15000)
public void testFencingMustBeConfigured() throws Exception {
DummyHAService svc = Mockito.spy(cluster.getService(0));
Mockito.doThrow(new BadFencingConfigurationException("no fencing"))
.when(svc).checkFencingConfigured();
// Format the base dir, should succeed
assertEquals(0, runFC(svc, "-formatZK"));
// Try to run the actual FC, should fail without a fencer
assertEquals(ZKFailoverController.ERR_CODE_NO_FENCER,
runFC(svc));
}
/**
* Test that, when the health monitor indicates bad health status,
* failover is triggered. Also ensures that graceful active->standby
* transition is used when possible, falling back to fencing when
* the graceful approach fails.
*/
@Test(timeout=15000)
public void testAutoFailoverOnBadHealth() throws Exception {
try {
cluster.start();
DummyHAService svc1 = cluster.getService(1);
LOG.info("Faking svc0 unhealthy, should failover to svc1");
cluster.setHealthy(0, false);
LOG.info("Waiting for svc0 to enter initializing state");
cluster.waitForHAState(0, HAServiceState.INITIALIZING);
cluster.waitForHAState(1, HAServiceState.ACTIVE);
LOG.info("Allowing svc0 to be healthy again, making svc1 unreachable " +
"and fail to gracefully go to standby");
cluster.setUnreachable(1, true);
cluster.setHealthy(0, true);
// Should fail back to svc0 at this point
cluster.waitForHAState(0, HAServiceState.ACTIVE);
// and fence svc1
Mockito.verify(svc1.fencer).fence(Mockito.same(svc1));
} finally {
cluster.stop();
}
}
/**
* Test that, when the health monitor indicates bad health status,
* failover is triggered. Also ensures that graceful active->standby
* transition is used when possible, falling back to fencing when
* the graceful approach fails.
*/
@Test(timeout=15000)
public void testAutoFailoverOnBadState() throws Exception {
try {
cluster.start();
DummyHAService svc0 = cluster.getService(0);
LOG.info("Faking svc0 to change the state, should failover to svc1");
svc0.state = HAServiceState.STANDBY;
// Should fail back to svc0 at this point
cluster.waitForHAState(1, HAServiceState.ACTIVE);
} finally {
cluster.stop();
}
}
@Test(timeout=15000)
public void testAutoFailoverOnLostZKSession() throws Exception {
try {
cluster.start();
// Expire svc0, it should fail over to svc1
cluster.expireAndVerifyFailover(0, 1);
// Expire svc1, it should fail back to svc0
cluster.expireAndVerifyFailover(1, 0);
LOG.info("======= Running test cases second time to test " +
"re-establishment =========");
// Expire svc0, it should fail over to svc1
cluster.expireAndVerifyFailover(0, 1);
// Expire svc1, it should fail back to svc0
cluster.expireAndVerifyFailover(1, 0);
} finally {
cluster.stop();
}
}
/**
* Test that, if the standby node is unhealthy, it doesn't try to become
* active
*/
@Test(timeout=15000)
public void testDontFailoverToUnhealthyNode() throws Exception {
try {
cluster.start();
// Make svc1 unhealthy, and wait for its FC to notice the bad health.
cluster.setHealthy(1, false);
cluster.waitForHealthState(1, HealthMonitor.State.SERVICE_UNHEALTHY);
// Expire svc0
cluster.getElector(0).preventSessionReestablishmentForTests();
try {
cluster.expireActiveLockHolder(0);
LOG.info("Expired svc0's ZK session. Waiting a second to give svc1" +
" a chance to take the lock, if it is ever going to.");
Thread.sleep(1000);
// Ensure that no one holds the lock.
cluster.waitForActiveLockHolder(null);
} finally {
LOG.info("Allowing svc0's elector to re-establish its connection");
cluster.getElector(0).allowSessionReestablishmentForTests();
}
// svc0 should get the lock again
cluster.waitForActiveLockHolder(0);
} finally {
cluster.stop();
}
}
/**
* Test that the ZKFC successfully quits the election when it fails to
* become active. This allows the old node to successfully fail back.
*/
@Test(timeout=15000)
public void testBecomingActiveFails() throws Exception {
try {
cluster.start();
DummyHAService svc1 = cluster.getService(1);
LOG.info("Making svc1 fail to become active");
cluster.setFailToBecomeActive(1, true);
LOG.info("Faking svc0 unhealthy, should NOT successfully " +
"failover to svc1");
cluster.setHealthy(0, false);
cluster.waitForHealthState(0, State.SERVICE_UNHEALTHY);
cluster.waitForActiveLockHolder(null);
Mockito.verify(svc1.proxy, Mockito.timeout(2000).atLeastOnce())
.transitionToActive(Mockito.<StateChangeRequestInfo>any());
cluster.waitForHAState(0, HAServiceState.INITIALIZING);
cluster.waitForHAState(1, HAServiceState.STANDBY);
LOG.info("Faking svc0 healthy again, should go back to svc0");
cluster.setHealthy(0, true);
cluster.waitForHAState(0, HAServiceState.ACTIVE);
cluster.waitForHAState(1, HAServiceState.STANDBY);
cluster.waitForActiveLockHolder(0);
// Ensure that we can fail back to svc1 once it it is able
// to become active (e.g the admin has restarted it)
LOG.info("Allowing svc1 to become active, expiring svc0");
svc1.failToBecomeActive = false;
cluster.expireAndVerifyFailover(0, 1);
} finally {
cluster.stop();
}
}
/**
* Test that, when ZooKeeper fails, the system remains in its
* current state, without triggering any failovers, and without
* causing the active node to enter standby state.
*/
@Test(timeout=15000)
public void testZooKeeperFailure() throws Exception {
try {
cluster.start();
// Record initial ZK sessions
long session0 = cluster.getElector(0).getZKSessionIdForTests();
long session1 = cluster.getElector(1).getZKSessionIdForTests();
LOG.info("====== Stopping ZK server");
stopServer();
waitForServerDown(hostPort, CONNECTION_TIMEOUT);
LOG.info("====== Waiting for services to enter NEUTRAL mode");
cluster.waitForElectorState(0,
ActiveStandbyElector.State.NEUTRAL);
cluster.waitForElectorState(1,
ActiveStandbyElector.State.NEUTRAL);
LOG.info("====== Checking that the services didn't change HA state");
assertEquals(HAServiceState.ACTIVE, cluster.getService(0).state);
assertEquals(HAServiceState.STANDBY, cluster.getService(1).state);
LOG.info("====== Restarting server");
startServer();
waitForServerUp(hostPort, CONNECTION_TIMEOUT);
// Nodes should go back to their original states, since they re-obtain
// the same sessions.
cluster.waitForElectorState(0, ActiveStandbyElector.State.ACTIVE);
cluster.waitForElectorState(1, ActiveStandbyElector.State.STANDBY);
// Check HA states didn't change.
cluster.waitForHAState(0, HAServiceState.ACTIVE);
cluster.waitForHAState(1, HAServiceState.STANDBY);
// Check they re-used the same sessions and didn't spuriously reconnect
assertEquals(session0,
cluster.getElector(0).getZKSessionIdForTests());
assertEquals(session1,
cluster.getElector(1).getZKSessionIdForTests());
} finally {
cluster.stop();
}
}
/**
* Test that the ZKFC can gracefully cede its active status.
*/
@Test(timeout=15000)
public void testCedeActive() throws Exception {
try {
cluster.start();
DummyZKFC zkfc = cluster.getZkfc(0);
// It should be in active to start.
assertEquals(ActiveStandbyElector.State.ACTIVE,
zkfc.getElectorForTests().getStateForTests());
// Ask it to cede active for 3 seconds. It should respond promptly
// (i.e. the RPC itself should not take 3 seconds!)
ZKFCProtocol proxy = zkfc.getLocalTarget().getZKFCProxy(conf, 5000);
long st = Time.now();
proxy.cedeActive(3000);
long et = Time.now();
assertTrue("RPC to cedeActive took " + (et - st) + " ms",
et - st < 1000);
// Should be in "INIT" state since it's not in the election
// at this point.
assertEquals(ActiveStandbyElector.State.INIT,
zkfc.getElectorForTests().getStateForTests());
// After the prescribed 3 seconds, should go into STANDBY state,
// since the other node in the cluster would have taken ACTIVE.
cluster.waitForElectorState(0, ActiveStandbyElector.State.STANDBY);
long et2 = Time.now();
assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) +
"ms before rejoining.",
et2 - et > 2800);
} finally {
cluster.stop();
}
}
@Test(timeout=25000)
public void testGracefulFailover() throws Exception {
try {
cluster.start();
cluster.waitForActiveLockHolder(0);
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
cluster.waitForActiveLockHolder(1);
cluster.getService(0).getZKFCProxy(conf, 5000).gracefulFailover();
cluster.waitForActiveLockHolder(0);
Thread.sleep(10000); // allow to quiesce
assertEquals(0, cluster.getService(0).fenceCount);
assertEquals(0, cluster.getService(1).fenceCount);
assertEquals(2, cluster.getService(0).activeTransitionCount);
assertEquals(1, cluster.getService(1).activeTransitionCount);
} finally {
cluster.stop();
}
}
@Test(timeout=15000)
public void testGracefulFailoverToUnhealthy() throws Exception {
try {
cluster.start();
cluster.waitForActiveLockHolder(0);
// Mark it unhealthy, wait for it to exit election
cluster.setHealthy(1, false);
cluster.waitForElectorState(1, ActiveStandbyElector.State.INIT);
// Ask for failover, it should fail, because it's unhealthy
try {
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
fail("Did not fail to graceful failover to unhealthy service!");
} catch (ServiceFailedException sfe) {
GenericTestUtils.assertExceptionContains(
cluster.getService(1).toString() +
" is not currently healthy.", sfe);
}
} finally {
cluster.stop();
}
}
@Test(timeout=15000)
public void testGracefulFailoverFailBecomingActive() throws Exception {
try {
cluster.start();
cluster.waitForActiveLockHolder(0);
cluster.setFailToBecomeActive(1, true);
// Ask for failover, it should fail and report back to user.
try {
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
fail("Did not fail to graceful failover when target failed " +
"to become active!");
} catch (ServiceFailedException sfe) {
GenericTestUtils.assertExceptionContains(
"Couldn't make " + cluster.getService(1) + " active", sfe);
GenericTestUtils.assertExceptionContains(
"injected failure", sfe);
}
// No fencing
assertEquals(0, cluster.getService(0).fenceCount);
assertEquals(0, cluster.getService(1).fenceCount);
// Service 0 should go back to being active after the failed failover
cluster.waitForActiveLockHolder(0);
} finally {
cluster.stop();
}
}
@Test(timeout=15000)
public void testGracefulFailoverFailBecomingStandby() throws Exception {
try {
cluster.start();
cluster.waitForActiveLockHolder(0);
// Ask for failover when old node fails to transition to standby.
// This should trigger fencing, since the cedeActive() command
// still works, but leaves the breadcrumb in place.
cluster.setFailToBecomeStandby(0, true);
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
// Check that the old node was fenced
assertEquals(1, cluster.getService(0).fenceCount);
} finally {
cluster.stop();
}
}
@Test(timeout=15000)
public void testGracefulFailoverFailBecomingStandbyAndFailFence()
throws Exception {
try {
cluster.start();
cluster.waitForActiveLockHolder(0);
// Ask for failover when old node fails to transition to standby.
// This should trigger fencing, since the cedeActive() command
// still works, but leaves the breadcrumb in place.
cluster.setFailToBecomeStandby(0, true);
cluster.setFailToFence(0, true);
try {
cluster.getService(1).getZKFCProxy(conf, 5000).gracefulFailover();
fail("Failover should have failed when old node wont fence");
} catch (ServiceFailedException sfe) {
GenericTestUtils.assertExceptionContains(
"Unable to fence " + cluster.getService(0), sfe);
}
} finally {
cluster.stop();
}
}
/**
* Test which exercises all of the inputs into ZKFC. This is particularly
* useful for running under jcarder to check for lock order violations.
*/
@Test(timeout=30000)
public void testOneOfEverything() throws Exception {
try {
cluster.start();
// Failover by session expiration
LOG.info("====== Failing over by session expiration");
cluster.expireAndVerifyFailover(0, 1);
cluster.expireAndVerifyFailover(1, 0);
// Restart ZK
LOG.info("====== Restarting server");
stopServer();
waitForServerDown(hostPort, CONNECTION_TIMEOUT);
startServer();
waitForServerUp(hostPort, CONNECTION_TIMEOUT);
// Failover by bad health
cluster.setHealthy(0, false);
cluster.waitForHAState(0, HAServiceState.INITIALIZING);
cluster.waitForHAState(1, HAServiceState.ACTIVE);
cluster.setHealthy(1, true);
cluster.setHealthy(0, false);
cluster.waitForHAState(1, HAServiceState.ACTIVE);
cluster.waitForHAState(0, HAServiceState.INITIALIZING);
cluster.setHealthy(0, true);
cluster.waitForHealthState(0, State.SERVICE_HEALTHY);
// Graceful failovers
cluster.getZkfc(1).gracefulFailoverToYou();
cluster.getZkfc(0).gracefulFailoverToYou();
} finally {
cluster.stop();
}
}
private int runFC(DummyHAService target, String ... args) throws Exception {
DummyZKFC zkfc = new DummyZKFC(conf, target);
return zkfc.run(args);
}
}
| 21,149 | 33.446254 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import static org.junit.Assert.*;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
public class TestHAAdmin {
private static final Log LOG = LogFactory.getLog(TestHAAdmin.class);
private HAAdmin tool;
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
private ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
private String errOutput;
private String output;
@Before
public void setup() throws IOException {
tool = new HAAdmin() {
@Override
protected HAServiceTarget resolveTarget(String target) {
return new DummyHAService(HAServiceState.STANDBY,
new InetSocketAddress("dummy", 12345));
}
};
tool.setConf(new Configuration());
tool.errOut = new PrintStream(errOutBytes);
tool.out = new PrintStream(outBytes);
}
private void assertOutputContains(String string) {
if (!errOutput.contains(string) && !output.contains(string)) {
fail("Expected output to contain '" + string +
"' but err_output was:\n" + errOutput +
"\n and output was: \n" + output);
}
}
@Test
public void testAdminUsage() throws Exception {
assertEquals(-1, runTool());
assertOutputContains("Usage:");
assertOutputContains("-transitionToActive");
assertEquals(-1, runTool("badCommand"));
assertOutputContains("Bad command 'badCommand'");
assertEquals(-1, runTool("-badCommand"));
assertOutputContains("badCommand: Unknown");
// valid command but not enough arguments
assertEquals(-1, runTool("-transitionToActive"));
assertOutputContains("transitionToActive: incorrect number of arguments");
assertEquals(-1, runTool("-transitionToActive", "x", "y"));
assertOutputContains("transitionToActive: incorrect number of arguments");
assertEquals(-1, runTool("-failover"));
assertOutputContains("failover: incorrect arguments");
assertOutputContains("failover: incorrect arguments");
assertEquals(-1, runTool("-failover", "foo:1234"));
assertOutputContains("failover: incorrect arguments");
}
@Test
public void testHelp() throws Exception {
assertEquals(0, runTool("-help"));
assertEquals(0, runTool("-help", "transitionToActive"));
assertOutputContains("Transitions the service into Active");
}
private Object runTool(String ... args) throws Exception {
errOutBytes.reset();
outBytes.reset();
LOG.info("Running: HAAdmin " + Joiner.on(" ").join(args));
int ret = tool.run(args);
errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
output = new String(outBytes.toByteArray(), Charsets.UTF_8);
LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
return ret;
}
}
| 3,995 | 35 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.Socket;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.util.Time;
import org.apache.zookeeper.PortAssignment;
import org.apache.zookeeper.TestableZooKeeper;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.KeeperState;
import org.apache.zookeeper.ZKTestCase;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.server.ServerCnxnFactory;
import org.apache.zookeeper.server.ServerCnxnFactoryAccessor;
import org.apache.zookeeper.server.ZKDatabase;
import org.apache.zookeeper.server.ZooKeeperServer;
import org.apache.zookeeper.server.persistence.FileTxnLog;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
/**
* Copy-paste of ClientBase from ZooKeeper, but without any of the
* JMXEnv verification. There seems to be a bug ZOOKEEPER-1438
* which causes spurious failures in the JMXEnv verification when
* we run these tests with the upstream ClientBase.
*/
public abstract class ClientBaseWithFixes extends ZKTestCase {
protected static final Logger LOG = LoggerFactory.getLogger(ClientBaseWithFixes.class);
public static int CONNECTION_TIMEOUT = 30000;
static final File BASETEST =
new File(System.getProperty("test.build.data", "build"));
protected final String hostPort = initHostPort();
protected int maxCnxns = 0;
protected ServerCnxnFactory serverFactory = null;
protected File tmpDir = null;
long initialFdCount;
/**
* In general don't use this. Only use in the special case that you
* want to ignore results (for whatever reason) in your test. Don't
* use empty watchers in real code!
*
*/
protected class NullWatcher implements Watcher {
@Override
public void process(WatchedEvent event) { /* nada */ }
}
protected static class CountdownWatcher implements Watcher {
// XXX this doesn't need to be volatile! (Should probably be final)
volatile CountDownLatch clientConnected;
volatile boolean connected;
protected ZooKeeper client;
public void initializeWatchedClient(ZooKeeper zk) {
if (client != null) {
throw new RuntimeException("Watched Client was already set");
}
client = zk;
}
public CountdownWatcher() {
reset();
}
synchronized public void reset() {
clientConnected = new CountDownLatch(1);
connected = false;
}
@Override
synchronized public void process(WatchedEvent event) {
if (event.getState() == KeeperState.SyncConnected ||
event.getState() == KeeperState.ConnectedReadOnly) {
connected = true;
notifyAll();
clientConnected.countDown();
} else {
connected = false;
notifyAll();
}
}
synchronized boolean isConnected() {
return connected;
}
@VisibleForTesting
public synchronized void waitForConnected(long timeout)
throws InterruptedException, TimeoutException {
long expire = Time.now() + timeout;
long left = timeout;
while(!connected && left > 0) {
wait(left);
left = expire - Time.now();
}
if (!connected) {
throw new TimeoutException("Did not connect");
}
}
@VisibleForTesting
public synchronized void waitForDisconnected(long timeout)
throws InterruptedException, TimeoutException {
long expire = Time.now() + timeout;
long left = timeout;
while(connected && left > 0) {
wait(left);
left = expire - Time.now();
}
if (connected) {
throw new TimeoutException("Did not disconnect");
}
}
}
protected TestableZooKeeper createClient()
throws IOException, InterruptedException
{
return createClient(hostPort);
}
protected TestableZooKeeper createClient(String hp)
throws IOException, InterruptedException
{
CountdownWatcher watcher = new CountdownWatcher();
return createClient(watcher, hp);
}
private LinkedList<ZooKeeper> allClients;
private boolean allClientsSetup = false;
private RandomAccessFile portNumLockFile;
private File portNumFile;
protected TestableZooKeeper createClient(CountdownWatcher watcher, String hp)
throws IOException, InterruptedException
{
return createClient(watcher, hp, CONNECTION_TIMEOUT);
}
protected TestableZooKeeper createClient(CountdownWatcher watcher,
String hp, int timeout)
throws IOException, InterruptedException
{
watcher.reset();
TestableZooKeeper zk = new TestableZooKeeper(hp, timeout, watcher);
if (!watcher.clientConnected.await(timeout, TimeUnit.MILLISECONDS))
{
Assert.fail("Unable to connect to server");
}
synchronized(this) {
if (!allClientsSetup) {
LOG.error("allClients never setup");
Assert.fail("allClients never setup");
}
if (allClients != null) {
allClients.add(zk);
} else {
// test done - close the zk, not needed
zk.close();
}
}
watcher.initializeWatchedClient(zk);
return zk;
}
public static class HostPort {
String host;
int port;
public HostPort(String host, int port) {
this.host = host;
this.port = port;
}
}
public static List<HostPort> parseHostPortList(String hplist) {
ArrayList<HostPort> alist = new ArrayList<HostPort>();
for (String hp: hplist.split(",")) {
int idx = hp.lastIndexOf(':');
String host = hp.substring(0, idx);
int port;
try {
port = Integer.parseInt(hp.substring(idx + 1));
} catch(RuntimeException e) {
throw new RuntimeException("Problem parsing " + hp + e.toString());
}
alist.add(new HostPort(host,port));
}
return alist;
}
/**
* Send the 4letterword
* @param host the destination host
* @param port the destination port
* @param cmd the 4letterword
* @return
* @throws IOException
*/
public static String send4LetterWord(String host, int port, String cmd)
throws IOException
{
LOG.info("connecting to " + host + " " + port);
Socket sock = new Socket(host, port);
BufferedReader reader = null;
try {
OutputStream outstream = sock.getOutputStream();
outstream.write(cmd.getBytes());
outstream.flush();
// this replicates NC - close the output stream before reading
sock.shutdownOutput();
reader =
new BufferedReader(
new InputStreamReader(sock.getInputStream()));
StringBuilder sb = new StringBuilder();
String line;
while((line = reader.readLine()) != null) {
sb.append(line + "\n");
}
return sb.toString();
} finally {
sock.close();
if (reader != null) {
reader.close();
}
}
}
public static boolean waitForServerUp(String hp, long timeout) {
long start = Time.now();
while (true) {
try {
// if there are multiple hostports, just take the first one
HostPort hpobj = parseHostPortList(hp).get(0);
String result = send4LetterWord(hpobj.host, hpobj.port, "stat");
if (result.startsWith("Zookeeper version:") &&
!result.contains("READ-ONLY")) {
return true;
}
} catch (IOException e) {
// ignore as this is expected
LOG.info("server " + hp + " not up " + e);
}
if (Time.now() > start + timeout) {
break;
}
try {
Thread.sleep(250);
} catch (InterruptedException e) {
// ignore
}
}
return false;
}
public static boolean waitForServerDown(String hp, long timeout) {
long start = Time.now();
while (true) {
try {
HostPort hpobj = parseHostPortList(hp).get(0);
send4LetterWord(hpobj.host, hpobj.port, "stat");
} catch (IOException e) {
return true;
}
if (Time.now() > start + timeout) {
break;
}
try {
Thread.sleep(250);
} catch (InterruptedException e) {
// ignore
}
}
return false;
}
public static File createTmpDir() throws IOException {
return createTmpDir(BASETEST);
}
static File createTmpDir(File parentDir) throws IOException {
File tmpFile = File.createTempFile("test", ".junit", parentDir);
// don't delete tmpFile - this ensures we don't attempt to create
// a tmpDir with a duplicate name
File tmpDir = new File(tmpFile + ".dir");
Assert.assertFalse(tmpDir.exists()); // never true if tmpfile does it's job
Assert.assertTrue(tmpDir.mkdirs());
return tmpDir;
}
private static int getPort(String hostPort) {
String[] split = hostPort.split(":");
String portstr = split[split.length-1];
String[] pc = portstr.split("/");
if (pc.length > 1) {
portstr = pc[0];
}
return Integer.parseInt(portstr);
}
static ServerCnxnFactory createNewServerInstance(File dataDir,
ServerCnxnFactory factory, String hostPort, int maxCnxns)
throws IOException, InterruptedException
{
ZooKeeperServer zks = new ZooKeeperServer(dataDir, dataDir, 3000);
final int PORT = getPort(hostPort);
if (factory == null) {
factory = ServerCnxnFactory.createFactory(PORT, maxCnxns);
}
factory.startup(zks);
Assert.assertTrue("waiting for server up",
ClientBaseWithFixes.waitForServerUp("127.0.0.1:" + PORT,
CONNECTION_TIMEOUT));
return factory;
}
static void shutdownServerInstance(ServerCnxnFactory factory,
String hostPort)
{
if (factory != null) {
ZKDatabase zkDb;
{
ZooKeeperServer zs = getServer(factory);
zkDb = zs.getZKDatabase();
}
factory.shutdown();
try {
zkDb.close();
} catch (IOException ie) {
LOG.warn("Error closing logs ", ie);
}
final int PORT = getPort(hostPort);
Assert.assertTrue("waiting for server down",
ClientBaseWithFixes.waitForServerDown("127.0.0.1:" + PORT,
CONNECTION_TIMEOUT));
}
}
/**
* Test specific setup
*/
public static void setupTestEnv() {
// during the tests we run with 100K prealloc in the logs.
// on windows systems prealloc of 64M was seen to take ~15seconds
// resulting in test Assert.failure (client timeout on first session).
// set env and directly in order to handle static init/gc issues
System.setProperty("zookeeper.preAllocSize", "100");
FileTxnLog.setPreallocSize(100 * 1024);
}
protected void setUpAll() throws Exception {
allClients = new LinkedList<ZooKeeper>();
allClientsSetup = true;
}
@Before
public void setUp() throws Exception {
BASETEST.mkdirs();
setupTestEnv();
setUpAll();
tmpDir = createTmpDir(BASETEST);
startServer();
LOG.info("Client test setup finished");
}
private String initHostPort() {
BASETEST.mkdirs();
int port;
for (;;) {
port = PortAssignment.unique();
FileLock lock = null;
portNumLockFile = null;
try {
try {
portNumFile = new File(BASETEST, port + ".lock");
portNumLockFile = new RandomAccessFile(portNumFile, "rw");
try {
lock = portNumLockFile.getChannel().tryLock();
} catch (OverlappingFileLockException e) {
continue;
}
} finally {
if (lock != null)
break;
if (portNumLockFile != null)
portNumLockFile.close();
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
return "127.0.0.1:" + port;
}
protected void startServer() throws Exception {
LOG.info("STARTING server");
serverFactory = createNewServerInstance(tmpDir, serverFactory, hostPort, maxCnxns);
}
protected void stopServer() throws Exception {
LOG.info("STOPPING server");
shutdownServerInstance(serverFactory, hostPort);
serverFactory = null;
}
protected static ZooKeeperServer getServer(ServerCnxnFactory fac) {
ZooKeeperServer zs = ServerCnxnFactoryAccessor.getZkServer(fac);
return zs;
}
protected void tearDownAll() throws Exception {
synchronized (this) {
if (allClients != null) for (ZooKeeper zk : allClients) {
try {
if (zk != null)
zk.close();
} catch (InterruptedException e) {
LOG.warn("ignoring interrupt", e);
}
}
allClients = null;
}
}
@After
public void tearDown() throws Exception {
LOG.info("tearDown starting");
tearDownAll();
stopServer();
portNumLockFile.close();
portNumFile.delete();
if (tmpDir != null) {
Assert.assertTrue("delete " + tmpDir.toString(), recursiveDelete(tmpDir));
}
// This has to be set to null when the same instance of this class is reused between test cases
serverFactory = null;
}
public static boolean recursiveDelete(File d) {
if (d.isDirectory()) {
File children[] = d.listFiles();
for (File f : children) {
Assert.assertTrue("delete " + f.toString(), recursiveDelete(f));
}
}
return d.delete();
}
}
| 16,574 | 31.886905 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.Code;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.Watcher.Event;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import org.apache.zookeeper.ZooDefs.Ids;
import org.junit.Before;
import org.junit.Test;
import org.junit.Assert;
import org.mockito.Mockito;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
import org.apache.hadoop.test.GenericTestUtils;
public class TestActiveStandbyElector {
private ZooKeeper mockZK;
private int count;
private ActiveStandbyElectorCallback mockApp;
private final byte[] data = new byte[8];
private ActiveStandbyElectorTester elector;
class ActiveStandbyElectorTester extends ActiveStandbyElector {
private int sleptFor = 0;
ActiveStandbyElectorTester(String hostPort, int timeout, String parent,
List<ACL> acl, ActiveStandbyElectorCallback app) throws IOException,
KeeperException {
super(hostPort, timeout, parent, acl, Collections
.<ZKAuthInfo> emptyList(), app,
CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
}
@Override
public ZooKeeper getNewZooKeeper() {
++count;
return mockZK;
}
@Override
protected void sleepFor(int ms) {
// don't sleep in unit tests! Instead, just record the amount of
// time slept
LOG.info("Would have slept for " + ms + "ms");
sleptFor += ms;
}
}
private static final String ZK_PARENT_NAME = "/parent/node";
private static final String ZK_LOCK_NAME = ZK_PARENT_NAME + "/" +
ActiveStandbyElector.LOCK_FILENAME;
private static final String ZK_BREADCRUMB_NAME = ZK_PARENT_NAME + "/" +
ActiveStandbyElector.BREADCRUMB_FILENAME;
@Before
public void init() throws IOException, KeeperException {
count = 0;
mockZK = Mockito.mock(ZooKeeper.class);
mockApp = Mockito.mock(ActiveStandbyElectorCallback.class);
elector = new ActiveStandbyElectorTester("hostPort", 1000, ZK_PARENT_NAME,
Ids.OPEN_ACL_UNSAFE, mockApp);
}
/**
* Set up the mock ZK to return no info for a prior active in ZK.
*/
private void mockNoPriorActive() throws Exception {
Mockito.doThrow(new KeeperException.NoNodeException()).when(mockZK)
.getData(Mockito.eq(ZK_BREADCRUMB_NAME), Mockito.anyBoolean(),
Mockito.<Stat>any());
}
/**
* Set up the mock to return info for some prior active node in ZK./
*/
private void mockPriorActive(byte[] data) throws Exception {
Mockito.doReturn(data).when(mockZK)
.getData(Mockito.eq(ZK_BREADCRUMB_NAME), Mockito.anyBoolean(),
Mockito.<Stat>any());
}
/**
* verify that joinElection checks for null data
*/
@Test(expected = HadoopIllegalArgumentException.class)
public void testJoinElectionException() {
elector.joinElection(null);
}
/**
* verify that joinElection tries to create ephemeral lock znode
*/
@Test
public void testJoinElection() {
elector.joinElection(data);
Mockito.verify(mockZK, Mockito.times(1)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
}
/**
* verify that successful znode create result becomes active and monitoring is
* started
*/
@Test
public void testCreateNodeResultBecomeActive() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
Mockito.verify(mockApp, Mockito.times(1)).becomeActive();
verifyExistCall(1);
// monitor callback verifies the leader is ephemeral owner of lock but does
// not call becomeActive since its already active
Stat stat = new Stat();
stat.setEphemeralOwner(1L);
Mockito.when(mockZK.getSessionId()).thenReturn(1L);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
// should not call neutral mode/standby/active
Mockito.verify(mockApp, Mockito.times(0)).enterNeutralMode();
Mockito.verify(mockApp, Mockito.times(0)).becomeStandby();
Mockito.verify(mockApp, Mockito.times(1)).becomeActive();
// another joinElection not called.
Mockito.verify(mockZK, Mockito.times(1)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
// no new monitor called
verifyExistCall(1);
}
/**
* Verify that, when the callback fails to enter active state,
* the elector rejoins the election after sleeping for a short period.
*/
@Test
public void testFailToBecomeActive() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
Assert.assertEquals(0, elector.sleptFor);
Mockito.doThrow(new ServiceFailedException("failed to become active"))
.when(mockApp).becomeActive();
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
// Should have tried to become active
Mockito.verify(mockApp).becomeActive();
// should re-join
Mockito.verify(mockZK, Mockito.times(2)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
Assert.assertEquals(2, count);
Assert.assertTrue(elector.sleptFor > 0);
}
/**
* Verify that, when the callback fails to enter active state, after
* a ZK disconnect (i.e from the StatCallback), that the elector rejoins
* the election after sleeping for a short period.
*/
@Test
public void testFailToBecomeActiveAfterZKDisconnect() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
Assert.assertEquals(0, elector.sleptFor);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
Mockito.verify(mockZK, Mockito.times(2)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
verifyExistCall(1);
Stat stat = new Stat();
stat.setEphemeralOwner(1L);
Mockito.when(mockZK.getSessionId()).thenReturn(1L);
// Fake failure to become active from within the stat callback
Mockito.doThrow(new ServiceFailedException("fail to become active"))
.when(mockApp).becomeActive();
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
Mockito.verify(mockApp, Mockito.times(1)).becomeActive();
// should re-join
Mockito.verify(mockZK, Mockito.times(3)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
Assert.assertEquals(2, count);
Assert.assertTrue(elector.sleptFor > 0);
}
/**
* Verify that, if there is a record of a prior active node, the
* elector asks the application to fence it before becoming active.
*/
@Test
public void testFencesOldActive() throws Exception {
byte[] fakeOldActiveData = new byte[0];
mockPriorActive(fakeOldActiveData);
elector.joinElection(data);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
// Application fences active.
Mockito.verify(mockApp, Mockito.times(1)).fenceOldActive(
fakeOldActiveData);
// Updates breadcrumb node to new data
Mockito.verify(mockZK, Mockito.times(1)).setData(
Mockito.eq(ZK_BREADCRUMB_NAME),
Mockito.eq(data),
Mockito.eq(0));
// Then it becomes active itself
Mockito.verify(mockApp, Mockito.times(1)).becomeActive();
}
@Test
public void testQuitElectionRemovesBreadcrumbNode() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
// Writes its own active info
Mockito.verify(mockZK, Mockito.times(1)).create(
Mockito.eq(ZK_BREADCRUMB_NAME), Mockito.eq(data),
Mockito.eq(Ids.OPEN_ACL_UNSAFE),
Mockito.eq(CreateMode.PERSISTENT));
mockPriorActive(data);
elector.quitElection(false);
// Deletes its own active data
Mockito.verify(mockZK, Mockito.times(1)).delete(
Mockito.eq(ZK_BREADCRUMB_NAME), Mockito.eq(0));
}
/**
* verify that znode create for existing node and no retry becomes standby and
* monitoring is started
*/
@Test
public void testCreateNodeResultBecomeStandby() {
elector.joinElection(data);
elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
Mockito.verify(mockApp, Mockito.times(1)).becomeStandby();
verifyExistCall(1);
}
/**
* verify that znode create error result in fatal error
*/
@Test
public void testCreateNodeResultError() {
elector.joinElection(data);
elector.processResult(Code.APIERROR.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
Mockito.verify(mockApp, Mockito.times(1)).notifyFatalError(
"Received create error from Zookeeper. code:APIERROR " +
"for path " + ZK_LOCK_NAME);
}
/**
* verify that retry of network errors verifies master by session id and
* becomes active if they match. monitoring is started.
*/
@Test
public void testCreateNodeResultRetryBecomeActive() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
// 4 errors results in fatalError
Mockito
.verify(mockApp, Mockito.times(1))
.notifyFatalError(
"Received create error from Zookeeper. code:CONNECTIONLOSS " +
"for path " + ZK_LOCK_NAME + ". " +
"Not retrying further znode create connection errors.");
elector.joinElection(data);
// recreate connection via getNewZooKeeper
Assert.assertEquals(2, count);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
verifyExistCall(1);
Stat stat = new Stat();
stat.setEphemeralOwner(1L);
Mockito.when(mockZK.getSessionId()).thenReturn(1L);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
Mockito.verify(mockApp, Mockito.times(1)).becomeActive();
verifyExistCall(1);
Mockito.verify(mockZK, Mockito.times(6)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
}
/**
* verify that retry of network errors verifies active by session id and
* becomes standby if they dont match. monitoring is started.
*/
@Test
public void testCreateNodeResultRetryBecomeStandby() {
elector.joinElection(data);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
verifyExistCall(1);
Stat stat = new Stat();
stat.setEphemeralOwner(0);
Mockito.when(mockZK.getSessionId()).thenReturn(1L);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
Mockito.verify(mockApp, Mockito.times(1)).becomeStandby();
verifyExistCall(1);
}
/**
* verify that if create znode results in nodeexists and that znode is deleted
* before exists() watch is set then the return of the exists() method results
* in attempt to re-create the znode and become active
*/
@Test
public void testCreateNodeResultRetryNoNode() {
elector.joinElection(data);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
verifyExistCall(1);
elector.processResult(Code.NONODE.intValue(), ZK_LOCK_NAME, mockZK,
(Stat) null);
Mockito.verify(mockApp, Mockito.times(1)).enterNeutralMode();
Mockito.verify(mockZK, Mockito.times(4)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
}
/**
* verify that more than 3 network error retries result fatalError
*/
@Test
public void testStatNodeRetry() {
elector.joinElection(data);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
(Stat) null);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
(Stat) null);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
(Stat) null);
elector.processResult(Code.CONNECTIONLOSS.intValue(), ZK_LOCK_NAME, mockZK,
(Stat) null);
Mockito
.verify(mockApp, Mockito.times(1))
.notifyFatalError(
"Received stat error from Zookeeper. code:CONNECTIONLOSS. "+
"Not retrying further znode monitoring connection errors.");
}
/**
* verify error in exists() callback results in fatal error
*/
@Test
public void testStatNodeError() {
elector.joinElection(data);
elector.processResult(Code.RUNTIMEINCONSISTENCY.intValue(), ZK_LOCK_NAME,
mockZK, (Stat) null);
Mockito.verify(mockApp, Mockito.times(0)).enterNeutralMode();
Mockito.verify(mockApp, Mockito.times(1)).notifyFatalError(
"Received stat error from Zookeeper. code:RUNTIMEINCONSISTENCY");
}
/**
* verify behavior of watcher.process callback with non-node event
*/
@Test
public void testProcessCallbackEventNone() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
WatchedEvent mockEvent = Mockito.mock(WatchedEvent.class);
Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.None);
// first SyncConnected should not do anything
Mockito.when(mockEvent.getState()).thenReturn(
Event.KeeperState.SyncConnected);
elector.processWatchEvent(mockZK, mockEvent);
Mockito.verify(mockZK, Mockito.times(0)).exists(Mockito.anyString(),
Mockito.anyBoolean(), Mockito.<AsyncCallback.StatCallback> anyObject(),
Mockito.<Object> anyObject());
// disconnection should enter safe mode
Mockito.when(mockEvent.getState()).thenReturn(
Event.KeeperState.Disconnected);
elector.processWatchEvent(mockZK, mockEvent);
Mockito.verify(mockApp, Mockito.times(1)).enterNeutralMode();
// re-connection should monitor master status
Mockito.when(mockEvent.getState()).thenReturn(
Event.KeeperState.SyncConnected);
elector.processWatchEvent(mockZK, mockEvent);
verifyExistCall(1);
Assert.assertTrue(elector.isMonitorLockNodePending());
elector.processResult(Code.SESSIONEXPIRED.intValue(), ZK_LOCK_NAME,
mockZK, new Stat());
Assert.assertFalse(elector.isMonitorLockNodePending());
// session expired should enter safe mode and initiate re-election
// re-election checked via checking re-creation of new zookeeper and
// call to create lock znode
Mockito.when(mockEvent.getState()).thenReturn(Event.KeeperState.Expired);
elector.processWatchEvent(mockZK, mockEvent);
// already in safe mode above. should not enter safe mode again
Mockito.verify(mockApp, Mockito.times(1)).enterNeutralMode();
// called getNewZooKeeper to create new session. first call was in
// constructor
Assert.assertEquals(2, count);
// once in initial joinElection and one now
Mockito.verify(mockZK, Mockito.times(2)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
// create znode success. become master and monitor
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
Mockito.verify(mockApp, Mockito.times(1)).becomeActive();
verifyExistCall(2);
// error event results in fatal error
Mockito.when(mockEvent.getState()).thenReturn(Event.KeeperState.AuthFailed);
elector.processWatchEvent(mockZK, mockEvent);
Mockito.verify(mockApp, Mockito.times(1)).notifyFatalError(
"Unexpected Zookeeper watch event state: AuthFailed");
// only 1 state change callback is called at a time
Mockito.verify(mockApp, Mockito.times(1)).enterNeutralMode();
}
/**
* verify behavior of watcher.process with node event
*/
@Test
public void testProcessCallbackEventNode() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
// make the object go into the monitoring state
elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
Mockito.verify(mockApp, Mockito.times(1)).becomeStandby();
verifyExistCall(1);
Assert.assertTrue(elector.isMonitorLockNodePending());
Stat stat = new Stat();
stat.setEphemeralOwner(0L);
Mockito.when(mockZK.getSessionId()).thenReturn(1L);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
Assert.assertFalse(elector.isMonitorLockNodePending());
WatchedEvent mockEvent = Mockito.mock(WatchedEvent.class);
Mockito.when(mockEvent.getPath()).thenReturn(ZK_LOCK_NAME);
// monitoring should be setup again after event is received
Mockito.when(mockEvent.getType()).thenReturn(
Event.EventType.NodeDataChanged);
elector.processWatchEvent(mockZK, mockEvent);
verifyExistCall(2);
Assert.assertTrue(elector.isMonitorLockNodePending());
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
Assert.assertFalse(elector.isMonitorLockNodePending());
// monitoring should be setup again after event is received
Mockito.when(mockEvent.getType()).thenReturn(
Event.EventType.NodeChildrenChanged);
elector.processWatchEvent(mockZK, mockEvent);
verifyExistCall(3);
Assert.assertTrue(elector.isMonitorLockNodePending());
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
Assert.assertFalse(elector.isMonitorLockNodePending());
// lock node deletion when in standby mode should create znode again
// successful znode creation enters active state and sets monitor
Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.NodeDeleted);
elector.processWatchEvent(mockZK, mockEvent);
// enterNeutralMode not called when app is standby and leader is lost
Mockito.verify(mockApp, Mockito.times(0)).enterNeutralMode();
// once in initial joinElection() and one now
Mockito.verify(mockZK, Mockito.times(2)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
Mockito.verify(mockApp, Mockito.times(1)).becomeActive();
verifyExistCall(4);
Assert.assertTrue(elector.isMonitorLockNodePending());
stat.setEphemeralOwner(1L);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
Assert.assertFalse(elector.isMonitorLockNodePending());
// lock node deletion in active mode should enter neutral mode and create
// znode again successful znode creation enters active state and sets
// monitor
Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.NodeDeleted);
elector.processWatchEvent(mockZK, mockEvent);
Mockito.verify(mockApp, Mockito.times(1)).enterNeutralMode();
// another joinElection called
Mockito.verify(mockZK, Mockito.times(3)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
Mockito.verify(mockApp, Mockito.times(2)).becomeActive();
verifyExistCall(5);
Assert.assertTrue(elector.isMonitorLockNodePending());
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
Assert.assertFalse(elector.isMonitorLockNodePending());
// bad path name results in fatal error
Mockito.when(mockEvent.getPath()).thenReturn(null);
elector.processWatchEvent(mockZK, mockEvent);
Mockito.verify(mockApp, Mockito.times(1)).notifyFatalError(
"Unexpected watch error from Zookeeper");
// fatal error means no new connection other than one from constructor
Assert.assertEquals(1, count);
// no new watches after fatal error
verifyExistCall(5);
}
private void verifyExistCall(int times) {
Mockito.verify(mockZK, Mockito.times(times)).exists(
Mockito.eq(ZK_LOCK_NAME), Mockito.<Watcher>any(),
Mockito.same(elector),
Mockito.same(mockZK));
}
/**
* verify becomeStandby is not called if already in standby
*/
@Test
public void testSuccessiveStandbyCalls() {
elector.joinElection(data);
// make the object go into the monitoring standby state
elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
Mockito.verify(mockApp, Mockito.times(1)).becomeStandby();
verifyExistCall(1);
Assert.assertTrue(elector.isMonitorLockNodePending());
Stat stat = new Stat();
stat.setEphemeralOwner(0L);
Mockito.when(mockZK.getSessionId()).thenReturn(1L);
elector.processResult(Code.OK.intValue(), ZK_LOCK_NAME, mockZK, stat);
Assert.assertFalse(elector.isMonitorLockNodePending());
WatchedEvent mockEvent = Mockito.mock(WatchedEvent.class);
Mockito.when(mockEvent.getPath()).thenReturn(ZK_LOCK_NAME);
// notify node deletion
// monitoring should be setup again after event is received
Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.NodeDeleted);
elector.processWatchEvent(mockZK, mockEvent);
// is standby. no need to notify anything now
Mockito.verify(mockApp, Mockito.times(0)).enterNeutralMode();
// another joinElection called.
Mockito.verify(mockZK, Mockito.times(2)).create(ZK_LOCK_NAME, data,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
// lost election
elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
// still standby. so no need to notify again
Mockito.verify(mockApp, Mockito.times(1)).becomeStandby();
// monitor is set again
verifyExistCall(2);
}
/**
* verify quit election terminates connection and there are no new watches.
* next call to joinElection creates new connection and performs election
*/
@Test
public void testQuitElection() throws Exception {
elector.joinElection(data);
Mockito.verify(mockZK, Mockito.times(0)).close();
elector.quitElection(true);
Mockito.verify(mockZK, Mockito.times(1)).close();
// no watches added
verifyExistCall(0);
byte[] data = new byte[8];
elector.joinElection(data);
// getNewZooKeeper called 2 times. once in constructor and once now
Assert.assertEquals(2, count);
elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK,
ZK_LOCK_NAME);
Mockito.verify(mockApp, Mockito.times(1)).becomeStandby();
verifyExistCall(1);
}
/**
* verify that receiveActiveData gives data when active exists, tells that
* active does not exist and reports error in getting active information
*
* @throws IOException
* @throws InterruptedException
* @throws KeeperException
* @throws ActiveNotFoundException
*/
@Test
public void testGetActiveData() throws ActiveNotFoundException,
KeeperException, InterruptedException, IOException {
// get valid active data
byte[] data = new byte[8];
Mockito.when(
mockZK.getData(Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false),
Mockito.<Stat> anyObject())).thenReturn(data);
Assert.assertEquals(data, elector.getActiveData());
Mockito.verify(mockZK, Mockito.times(1)).getData(
Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false),
Mockito.<Stat> anyObject());
// active does not exist
Mockito.when(
mockZK.getData(Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false),
Mockito.<Stat> anyObject())).thenThrow(
new KeeperException.NoNodeException());
try {
elector.getActiveData();
Assert.fail("ActiveNotFoundException expected");
} catch(ActiveNotFoundException e) {
Mockito.verify(mockZK, Mockito.times(2)).getData(
Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false),
Mockito.<Stat> anyObject());
}
// error getting active data rethrows keeperexception
try {
Mockito.when(
mockZK.getData(Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false),
Mockito.<Stat> anyObject())).thenThrow(
new KeeperException.AuthFailedException());
elector.getActiveData();
Assert.fail("KeeperException.AuthFailedException expected");
} catch(KeeperException.AuthFailedException ke) {
Mockito.verify(mockZK, Mockito.times(3)).getData(
Mockito.eq(ZK_LOCK_NAME), Mockito.eq(false),
Mockito.<Stat> anyObject());
}
}
/**
* Test that ensureBaseNode() recursively creates the specified dir
*/
@Test
public void testEnsureBaseNode() throws Exception {
elector.ensureParentZNode();
StringBuilder prefix = new StringBuilder();
for (String part : ZK_PARENT_NAME.split("/")) {
if (part.isEmpty()) continue;
prefix.append("/").append(part);
if (!"/".equals(prefix.toString())) {
Mockito.verify(mockZK).create(
Mockito.eq(prefix.toString()), Mockito.<byte[]>any(),
Mockito.eq(Ids.OPEN_ACL_UNSAFE), Mockito.eq(CreateMode.PERSISTENT));
}
}
}
/**
* Test for a bug encountered during development of HADOOP-8163:
* ensureBaseNode() should throw an exception if it has to retry
* more than 3 times to create any part of the path.
*/
@Test
public void testEnsureBaseNodeFails() throws Exception {
Mockito.doThrow(new KeeperException.ConnectionLossException())
.when(mockZK).create(
Mockito.eq(ZK_PARENT_NAME), Mockito.<byte[]>any(),
Mockito.eq(Ids.OPEN_ACL_UNSAFE), Mockito.eq(CreateMode.PERSISTENT));
try {
elector.ensureParentZNode();
Assert.fail("Did not throw!");
} catch (IOException ioe) {
if (!(ioe.getCause() instanceof KeeperException.ConnectionLossException)) {
throw ioe;
}
}
// Should have tried three times
Mockito.verify(mockZK, Mockito.times(3)).create(
Mockito.eq(ZK_PARENT_NAME), Mockito.<byte[]>any(),
Mockito.eq(Ids.OPEN_ACL_UNSAFE), Mockito.eq(CreateMode.PERSISTENT));
}
/**
* verify the zookeeper connection establishment
*/
@Test
public void testWithoutZKServer() throws Exception {
try {
new ActiveStandbyElector("127.0.0.1", 2000, ZK_PARENT_NAME,
Ids.OPEN_ACL_UNSAFE, Collections.<ZKAuthInfo> emptyList(), mockApp,
CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
Assert.fail("Did not throw zookeeper connection loss exceptions!");
} catch (KeeperException ke) {
GenericTestUtils.assertExceptionContains( "ConnectionLoss", ke);
}
}
/**
* joinElection(..) should happen only after SERVICE_HEALTHY.
*/
@Test
public void testBecomeActiveBeforeServiceHealthy() throws Exception {
mockNoPriorActive();
WatchedEvent mockEvent = Mockito.mock(WatchedEvent.class);
Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.None);
// session expired should enter safe mode
// But for first time, before the SERVICE_HEALTY i.e. appData is set,
// should not enter the election.
Mockito.when(mockEvent.getState()).thenReturn(Event.KeeperState.Expired);
elector.processWatchEvent(mockZK, mockEvent);
// joinElection should not be called.
Mockito.verify(mockZK, Mockito.times(0)).create(ZK_LOCK_NAME, null,
Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK);
}
}
| 29,681 | 37.200772 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.Collections;
import java.util.UUID;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
import org.apache.hadoop.ha.ActiveStandbyElector.State;
import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
import org.apache.log4j.Level;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.server.ZooKeeperServer;
import org.junit.Test;
import org.mockito.AdditionalMatchers;
import org.mockito.Mockito;
import com.google.common.primitives.Ints;
/**
* Test for {@link ActiveStandbyElector} using real zookeeper.
*/
public class TestActiveStandbyElectorRealZK extends ClientBaseWithFixes {
static final int NUM_ELECTORS = 2;
static {
((Log4JLogger)ActiveStandbyElector.LOG).getLogger().setLevel(
Level.ALL);
}
static final String PARENT_DIR = "/" + UUID.randomUUID();
ActiveStandbyElector[] electors = new ActiveStandbyElector[NUM_ELECTORS];
private byte[][] appDatas = new byte[NUM_ELECTORS][];
private ActiveStandbyElectorCallback[] cbs =
new ActiveStandbyElectorCallback[NUM_ELECTORS];
private ZooKeeperServer zkServer;
@Override
public void setUp() throws Exception {
super.setUp();
zkServer = getServer(serverFactory);
for (int i = 0; i < NUM_ELECTORS; i++) {
cbs[i] = Mockito.mock(ActiveStandbyElectorCallback.class);
appDatas[i] = Ints.toByteArray(i);
electors[i] = new ActiveStandbyElector(hostPort, 5000, PARENT_DIR,
Ids.OPEN_ACL_UNSAFE, Collections.<ZKAuthInfo> emptyList(), cbs[i],
CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
}
}
private void checkFatalsAndReset() throws Exception {
for (int i = 0; i < NUM_ELECTORS; i++) {
Mockito.verify(cbs[i], Mockito.never()).notifyFatalError(
Mockito.anyString());
Mockito.reset(cbs[i]);
}
}
/**
* the test creates 2 electors which try to become active using a real
* zookeeper server. It verifies that 1 becomes active and 1 becomes standby.
* Upon becoming active the leader quits election and the test verifies that
* the standby now becomes active.
*/
@Test(timeout=20000)
public void testActiveStandbyTransition() throws Exception {
LOG.info("starting test with parentDir:" + PARENT_DIR);
assertFalse(electors[0].parentZNodeExists());
electors[0].ensureParentZNode();
assertTrue(electors[0].parentZNodeExists());
// First elector joins election, becomes active.
electors[0].joinElection(appDatas[0]);
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
zkServer, PARENT_DIR, appDatas[0]);
Mockito.verify(cbs[0], Mockito.timeout(1000)).becomeActive();
checkFatalsAndReset();
// Second elector joins election, becomes standby.
electors[1].joinElection(appDatas[1]);
Mockito.verify(cbs[1], Mockito.timeout(1000)).becomeStandby();
checkFatalsAndReset();
// First elector quits, second one should become active
electors[0].quitElection(true);
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
zkServer, PARENT_DIR, appDatas[1]);
Mockito.verify(cbs[1], Mockito.timeout(1000)).becomeActive();
checkFatalsAndReset();
// First one rejoins, becomes standby, second one stays active
electors[0].joinElection(appDatas[0]);
Mockito.verify(cbs[0], Mockito.timeout(1000)).becomeStandby();
checkFatalsAndReset();
// Second one expires, first one becomes active
electors[1].preventSessionReestablishmentForTests();
try {
zkServer.closeSession(electors[1].getZKSessionIdForTests());
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
zkServer, PARENT_DIR, appDatas[0]);
Mockito.verify(cbs[1], Mockito.timeout(1000)).enterNeutralMode();
Mockito.verify(cbs[0], Mockito.timeout(1000)).fenceOldActive(
AdditionalMatchers.aryEq(appDatas[1]));
Mockito.verify(cbs[0], Mockito.timeout(1000)).becomeActive();
} finally {
electors[1].allowSessionReestablishmentForTests();
}
// Second one eventually reconnects and becomes standby
Mockito.verify(cbs[1], Mockito.timeout(5000)).becomeStandby();
checkFatalsAndReset();
// First one expires, second one should become active
electors[0].preventSessionReestablishmentForTests();
try {
zkServer.closeSession(electors[0].getZKSessionIdForTests());
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
zkServer, PARENT_DIR, appDatas[1]);
Mockito.verify(cbs[0], Mockito.timeout(1000)).enterNeutralMode();
Mockito.verify(cbs[1], Mockito.timeout(1000)).fenceOldActive(
AdditionalMatchers.aryEq(appDatas[0]));
Mockito.verify(cbs[1], Mockito.timeout(1000)).becomeActive();
} finally {
electors[0].allowSessionReestablishmentForTests();
}
checkFatalsAndReset();
}
@Test(timeout=15000)
public void testHandleSessionExpiration() throws Exception {
ActiveStandbyElectorCallback cb = cbs[0];
byte[] appData = appDatas[0];
ActiveStandbyElector elector = electors[0];
// Let the first elector become active
elector.ensureParentZNode();
elector.joinElection(appData);
ZooKeeperServer zks = getServer(serverFactory);
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
zks, PARENT_DIR, appData);
Mockito.verify(cb, Mockito.timeout(1000)).becomeActive();
checkFatalsAndReset();
LOG.info("========================== Expiring session");
zks.closeSession(elector.getZKSessionIdForTests());
// Should enter neutral mode when disconnected
Mockito.verify(cb, Mockito.timeout(1000)).enterNeutralMode();
// Should re-join the election and regain active
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
zks, PARENT_DIR, appData);
Mockito.verify(cb, Mockito.timeout(1000)).becomeActive();
checkFatalsAndReset();
LOG.info("========================== Quitting election");
elector.quitElection(false);
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
zks, PARENT_DIR, null);
// Double check that we don't accidentally re-join the election
// due to receiving the "expired" event.
Thread.sleep(1000);
Mockito.verify(cb, Mockito.never()).becomeActive();
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
zks, PARENT_DIR, null);
checkFatalsAndReset();
}
@Test(timeout=15000)
public void testHandleSessionExpirationOfStandby() throws Exception {
// Let elector 0 be active
electors[0].ensureParentZNode();
electors[0].joinElection(appDatas[0]);
ZooKeeperServer zks = getServer(serverFactory);
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
zks, PARENT_DIR, appDatas[0]);
Mockito.verify(cbs[0], Mockito.timeout(1000)).becomeActive();
checkFatalsAndReset();
// Let elector 1 be standby
electors[1].joinElection(appDatas[1]);
ActiveStandbyElectorTestUtil.waitForElectorState(null, electors[1],
State.STANDBY);
LOG.info("========================== Expiring standby's session");
zks.closeSession(electors[1].getZKSessionIdForTests());
// Should enter neutral mode when disconnected
Mockito.verify(cbs[1], Mockito.timeout(1000)).enterNeutralMode();
// Should re-join the election and go back to STANDBY
ActiveStandbyElectorTestUtil.waitForElectorState(null, electors[1],
State.STANDBY);
checkFatalsAndReset();
LOG.info("========================== Quitting election");
electors[1].quitElection(false);
// Double check that we don't accidentally re-join the election
// by quitting elector 0 and ensuring elector 1 doesn't become active
electors[0].quitElection(false);
// due to receiving the "expired" event.
Thread.sleep(1000);
Mockito.verify(cbs[1], Mockito.never()).becomeActive();
ActiveStandbyElectorTestUtil.waitForActiveLockData(null,
zks, PARENT_DIR, null);
checkFatalsAndReset();
}
@Test(timeout=15000)
public void testDontJoinElectionOnDisconnectAndReconnect() throws Exception {
electors[0].ensureParentZNode();
stopServer();
ActiveStandbyElectorTestUtil.waitForElectorState(
null, electors[0], State.NEUTRAL);
startServer();
waitForServerUp(hostPort, CONNECTION_TIMEOUT);
// Have to sleep to allow time for the clients to reconnect.
Thread.sleep(2000);
Mockito.verify(cbs[0], Mockito.never()).becomeActive();
Mockito.verify(cbs[1], Mockito.never()).becomeActive();
checkFatalsAndReset();
}
}
| 9,686 | 36.257692 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHtmlQuoting.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import static org.junit.Assert.*;
import javax.servlet.http.HttpServletRequest;
import org.junit.Test;
import org.mockito.Mockito;
public class TestHtmlQuoting {
@Test public void testNeedsQuoting() throws Exception {
assertTrue(HtmlQuoting.needsQuoting("abcde>"));
assertTrue(HtmlQuoting.needsQuoting("<abcde"));
assertTrue(HtmlQuoting.needsQuoting("abc'de"));
assertTrue(HtmlQuoting.needsQuoting("abcde\""));
assertTrue(HtmlQuoting.needsQuoting("&"));
assertFalse(HtmlQuoting.needsQuoting(""));
assertFalse(HtmlQuoting.needsQuoting("ab\ncdef"));
assertFalse(HtmlQuoting.needsQuoting(null));
}
@Test public void testQuoting() throws Exception {
assertEquals("ab<cd", HtmlQuoting.quoteHtmlChars("ab<cd"));
assertEquals("ab>", HtmlQuoting.quoteHtmlChars("ab>"));
assertEquals("&&&", HtmlQuoting.quoteHtmlChars("&&&"));
assertEquals(" '\n", HtmlQuoting.quoteHtmlChars(" '\n"));
assertEquals(""", HtmlQuoting.quoteHtmlChars("\""));
assertEquals(null, HtmlQuoting.quoteHtmlChars(null));
}
private void runRoundTrip(String str) throws Exception {
assertEquals(str,
HtmlQuoting.unquoteHtmlChars(HtmlQuoting.quoteHtmlChars(str)));
}
@Test public void testRoundtrip() throws Exception {
runRoundTrip("");
runRoundTrip("<>&'\"");
runRoundTrip("ab>cd<ef&ghi'\"");
runRoundTrip("A string\n with no quotable chars in it!");
runRoundTrip(null);
StringBuilder buffer = new StringBuilder();
for(char ch=0; ch < 127; ++ch) {
buffer.append(ch);
}
runRoundTrip(buffer.toString());
}
@Test
public void testRequestQuoting() throws Exception {
HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
HttpServer2.QuotingInputFilter.RequestQuoter quoter =
new HttpServer2.QuotingInputFilter.RequestQuoter(mockReq);
Mockito.doReturn("a<b").when(mockReq).getParameter("x");
assertEquals("Test simple param quoting",
"a<b", quoter.getParameter("x"));
Mockito.doReturn(null).when(mockReq).getParameter("x");
assertEquals("Test that missing parameters dont cause NPE",
null, quoter.getParameter("x"));
Mockito.doReturn(new String[]{"a<b", "b"}).when(mockReq).getParameterValues("x");
assertArrayEquals("Test escaping of an array",
new String[]{"a<b", "b"}, quoter.getParameterValues("x"));
Mockito.doReturn(null).when(mockReq).getParameterValues("x");
assertArrayEquals("Test that missing parameters dont cause NPE for array",
null, quoter.getParameterValues("x"));
}
}
| 3,474 | 37.186813 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.apache.log4j.Logger;
import org.junit.Test;
public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
/**
* Check that a server is alive by probing the {@link HttpServer2#isAlive()} method
* and the text of its toString() description
* @param server server
*/
private void assertAlive(HttpServer2 server) {
assertTrue("Server is not alive", server.isAlive());
assertToStringContains(server, HttpServer2.STATE_DESCRIPTION_ALIVE);
}
private void assertNotLive(HttpServer2 server) {
assertTrue("Server should not be live", !server.isAlive());
assertToStringContains(server, HttpServer2.STATE_DESCRIPTION_NOT_LIVE);
}
/**
* Test that the server is alive once started
*
* @throws Throwable on failure
*/
@Test public void testCreatedServerIsNotAlive() throws Throwable {
HttpServer2 server = createTestServer();
assertNotLive(server);
}
@Test public void testStopUnstartedServer() throws Throwable {
HttpServer2 server = createTestServer();
stop(server);
}
/**
* Test that the server is alive once started
*
* @throws Throwable on failure
*/
@Test
public void testStartedServerIsAlive() throws Throwable {
HttpServer2 server = null;
server = createTestServer();
assertNotLive(server);
server.start();
assertAlive(server);
stop(server);
}
/**
* Test that the server with request logging enabled
*
* @throws Throwable on failure
*/
@Test
public void testStartedServerWithRequestLog() throws Throwable {
HttpRequestLogAppender requestLogAppender = new HttpRequestLogAppender();
requestLogAppender.setName("httprequestlog");
requestLogAppender.setFilename(System.getProperty("test.build.data", "/tmp/")
+ "jetty-name-yyyy_mm_dd.log");
Logger.getLogger(HttpServer2.class.getName() + ".test").addAppender(requestLogAppender);
HttpServer2 server = null;
server = createTestServer();
assertNotLive(server);
server.start();
assertAlive(server);
stop(server);
Logger.getLogger(HttpServer2.class.getName() + ".test").removeAppender(requestLogAppender);
}
/**
* Assert that the result of {@link HttpServer2#toString()} contains the specific text
* @param server server to examine
* @param text text to search for
*/
private void assertToStringContains(HttpServer2 server, String text) {
String description = server.toString();
assertTrue("Did not find \"" + text + "\" in \"" + description + "\"",
description.contains(text));
}
/**
* Test that the server is not alive once stopped
*
* @throws Throwable on failure
*/
@Test public void testStoppedServerIsNotAlive() throws Throwable {
HttpServer2 server = createAndStartTestServer();
assertAlive(server);
stop(server);
assertNotLive(server);
}
/**
* Test that the server is not alive once stopped
*
* @throws Throwable on failure
*/
@Test public void testStoppingTwiceServerIsAllowed() throws Throwable {
HttpServer2 server = createAndStartTestServer();
assertAlive(server);
stop(server);
assertNotLive(server);
stop(server);
assertNotLive(server);
}
/**
* Test that the server is alive once started
*
* @throws Throwable
* on failure
*/
@Test
public void testWepAppContextAfterServerStop() throws Throwable {
HttpServer2 server = null;
String key = "test.attribute.key";
String value = "test.attribute.value";
server = createTestServer();
assertNotLive(server);
server.start();
server.setAttribute(key, value);
assertAlive(server);
assertEquals(value, server.getAttribute(key));
stop(server);
assertNull("Server context should have cleared", server.getAttribute(key));
}
}
| 4,670 | 30.348993 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpCookieFlag.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.http;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import javax.net.ssl.HttpsURLConnection;
import javax.servlet.*;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.security.GeneralSecurityException;
import java.net.HttpCookie;
import java.util.List;
public class TestHttpCookieFlag {
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestHttpCookieFlag.class.getSimpleName();
private static String keystoresDir;
private static String sslConfDir;
private static SSLFactory clientSslFactory;
private static HttpServer2 server;
public static class DummyAuthenticationFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain) throws IOException,
ServletException {
HttpServletResponse resp = (HttpServletResponse) response;
boolean isHttps = "https".equals(request.getScheme());
AuthenticationFilter.createAuthCookie(resp, "token", null, null, -1,
true, isHttps);
chain.doFilter(request, resp);
}
@Override
public void destroy() {
}
}
public static class DummyFilterInitializer extends FilterInitializer {
@Override
public void initFilter(FilterContainer container, Configuration conf) {
container.addFilter("DummyAuth", DummyAuthenticationFilter.class
.getName(), null);
}
}
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = new Configuration();
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
DummyFilterInitializer.class.getName());
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
Configuration sslConf = new Configuration(false);
sslConf.addResource("ssl-server.xml");
sslConf.addResource("ssl-client.xml");
clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
clientSslFactory.init();
server = new HttpServer2.Builder()
.setName("test")
.addEndpoint(new URI("http://localhost"))
.addEndpoint(new URI("https://localhost"))
.setConf(conf)
.keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
.keyStore(sslConf.get("ssl.server.keystore.location"),
sslConf.get("ssl.server.keystore.password"),
sslConf.get("ssl.server.keystore.type", "jks"))
.trustStore(sslConf.get("ssl.server.truststore.location"),
sslConf.get("ssl.server.truststore.password"),
sslConf.get("ssl.server.truststore.type", "jks")).build();
server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
server.start();
}
@Test
public void testHttpCookie() throws IOException {
URL base = new URL("http://" + NetUtils.getHostPortString(server
.getConnectorAddress(0)));
HttpURLConnection conn = (HttpURLConnection) new URL(base,
"/echo").openConnection();
String header = conn.getHeaderField("Set-Cookie");
List<HttpCookie> cookies = HttpCookie.parse(header);
Assert.assertTrue(!cookies.isEmpty());
Assert.assertTrue(header.contains("; HttpOnly"));
Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
@Test
public void testHttpsCookie() throws IOException, GeneralSecurityException {
URL base = new URL("https://" + NetUtils.getHostPortString(server
.getConnectorAddress(1)));
HttpsURLConnection conn = (HttpsURLConnection) new URL(base,
"/echo").openConnection();
conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());
String header = conn.getHeaderField("Set-Cookie");
List<HttpCookie> cookies = HttpCookie.parse(header);
Assert.assertTrue(!cookies.isEmpty());
Assert.assertTrue(header.contains("; HttpOnly"));
Assert.assertTrue(cookies.get(0).getSecure());
Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
@AfterClass
public static void cleanup() throws Exception {
server.stop();
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
clientSslFactory.destroy();
}
}
| 5,898 | 37.555556 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLogAppender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class TestHttpRequestLogAppender {
@Test
public void testParameterPropagation() {
HttpRequestLogAppender requestLogAppender = new HttpRequestLogAppender();
requestLogAppender.setFilename("jetty-namenode-yyyy_mm_dd.log");
requestLogAppender.setRetainDays(17);
assertEquals("Filename mismatch", "jetty-namenode-yyyy_mm_dd.log",
requestLogAppender.getFilename());
assertEquals("Retain days mismatch", 17,
requestLogAppender.getRetainDays());
}
}
| 1,404 | 35.973684 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpServer2.Builder;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.net.MalformedURLException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* This is a base class for functional tests of the {@link HttpServer2}.
* The methods are static for other classes to import statically.
*/
public class HttpServerFunctionalTest extends Assert {
@SuppressWarnings("serial")
public static class LongHeaderServlet extends HttpServlet {
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
Assert.assertEquals(63 * 1024, request.getHeader("longheader").length());
response.setStatus(HttpServletResponse.SC_OK);
}
}
/** JVM property for the webapp test dir : {@value} */
public static final String TEST_BUILD_WEBAPPS = "test.build.webapps";
/** expected location of the test.build.webapps dir: {@value} */
private static final String BUILD_WEBAPPS_DIR = "build/test/webapps";
/** name of the test webapp: {@value} */
private static final String TEST = "test";
protected static URL baseUrl;
/**
* Create but do not start the test webapp server. The test webapp dir is
* prepared/checked in advance.
*
* @return the server instance
*
* @throws IOException if a problem occurs
* @throws AssertionError if a condition was not met
*/
public static HttpServer2 createTestServer() throws IOException {
prepareTestWebapp();
return createServer(TEST);
}
/**
* Create but do not start the test webapp server. The test webapp dir is
* prepared/checked in advance.
* @param conf the server configuration to use
* @return the server instance
*
* @throws IOException if a problem occurs
* @throws AssertionError if a condition was not met
*/
public static HttpServer2 createTestServer(Configuration conf)
throws IOException {
prepareTestWebapp();
return createServer(TEST, conf);
}
public static HttpServer2 createTestServer(Configuration conf, AccessControlList adminsAcl)
throws IOException {
prepareTestWebapp();
return createServer(TEST, conf, adminsAcl);
}
/**
* Create but do not start the test webapp server. The test webapp dir is
* prepared/checked in advance.
* @param conf the server configuration to use
* @return the server instance
*
* @throws IOException if a problem occurs
* @throws AssertionError if a condition was not met
*/
public static HttpServer2 createTestServer(Configuration conf,
String[] pathSpecs) throws IOException {
prepareTestWebapp();
return createServer(TEST, conf, pathSpecs);
}
/**
* Prepare the test webapp by creating the directory from the test properties
* fail if the directory cannot be created.
* @throws AssertionError if a condition was not met
*/
protected static void prepareTestWebapp() {
String webapps = System.getProperty(TEST_BUILD_WEBAPPS, BUILD_WEBAPPS_DIR);
File testWebappDir = new File(webapps +
File.separatorChar + TEST);
try {
if (!testWebappDir.exists()) {
if (!testWebappDir.mkdirs()) {
fail("Test webapp dir " + testWebappDir.getCanonicalPath()
+ " can not be created");
}
}
} catch (IOException e) {
}
}
/**
* Create an HttpServer instance on the given address for the given webapp
* @param host to bind
* @param port to bind
* @return the server
* @throws IOException if it could not be created
*/
public static HttpServer2 createServer(String host, int port)
throws IOException {
prepareTestWebapp();
return new HttpServer2.Builder().setName(TEST)
.addEndpoint(URI.create("http://" + host + ":" + port))
.setFindPort(true).build();
}
/**
* Create an HttpServer instance for the given webapp
* @param webapp the webapp to work with
* @return the server
* @throws IOException if it could not be created
*/
public static HttpServer2 createServer(String webapp) throws IOException {
return localServerBuilder(webapp).setFindPort(true).build();
}
/**
* Create an HttpServer instance for the given webapp
* @param webapp the webapp to work with
* @param conf the configuration to use for the server
* @return the server
* @throws IOException if it could not be created
*/
public static HttpServer2 createServer(String webapp, Configuration conf)
throws IOException {
return localServerBuilder(webapp).setFindPort(true).setConf(conf).build();
}
public static HttpServer2 createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
throws IOException {
return localServerBuilder(webapp).setFindPort(true).setConf(conf).setACL(adminsAcl).build();
}
private static Builder localServerBuilder(String webapp) {
return new HttpServer2.Builder().setName(webapp).addEndpoint(
URI.create("http://localhost:0"));
}
/**
* Create an HttpServer instance for the given webapp
* @param webapp the webapp to work with
* @param conf the configuration to use for the server
* @param pathSpecs the paths specifications the server will service
* @return the server
* @throws IOException if it could not be created
*/
public static HttpServer2 createServer(String webapp, Configuration conf,
String[] pathSpecs) throws IOException {
return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build();
}
/**
* Create and start a server with the test webapp
*
* @return the newly started server
*
* @throws IOException on any failure
* @throws AssertionError if a condition was not met
*/
public static HttpServer2 createAndStartTestServer() throws IOException {
HttpServer2 server = createTestServer();
server.start();
return server;
}
/**
* If the server is non null, stop it
* @param server to stop
* @throws Exception on any failure
*/
public static void stop(HttpServer2 server) throws Exception {
if (server != null) {
server.stop();
}
}
/**
* Pass in a server, return a URL bound to localhost and its port
* @param server server
* @return a URL bonded to the base of the server
* @throws MalformedURLException if the URL cannot be created.
*/
public static URL getServerURL(HttpServer2 server)
throws MalformedURLException {
assertNotNull("No server", server);
return new URL("http://"
+ NetUtils.getHostPortString(server.getConnectorAddress(0)));
}
/**
* Read in the content from a URL
* @param url URL To read
* @return the text from the output
* @throws IOException if something went wrong
*/
protected static String readOutput(URL url) throws IOException {
StringBuilder out = new StringBuilder();
InputStream in = url.openConnection().getInputStream();
byte[] buffer = new byte[64 * 1024];
int len = in.read(buffer);
while (len > 0) {
out.append(new String(buffer, 0, len));
len = in.read(buffer);
}
return out.toString();
}
/**
* Test that verifies headers can be up to 64K long.
* The test adds a 63K header leaving 1K for other headers.
* This is because the header buffer setting is for ALL headers,
* names and values included. */
protected void testLongHeader(HttpURLConnection conn) throws IOException {
StringBuilder sb = new StringBuilder();
for (int i = 0 ; i < 63 * 1024; i++) {
sb.append("a");
}
conn.setRequestProperty("longheader", sb.toString());
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
}
}
| 9,008 | 32.996226 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.http.HttpServer2.QuotingInputFilter.RequestQuoter;
import org.apache.hadoop.http.resource.JerseyResource;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
import org.mortbay.jetty.Connector;
import org.mortbay.util.ajax.JSON;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
public class TestHttpServer extends HttpServerFunctionalTest {
static final Log LOG = LogFactory.getLog(TestHttpServer.class);
private static HttpServer2 server;
private static final int MAX_THREADS = 10;
@SuppressWarnings("serial")
public static class EchoMapServlet extends HttpServlet {
@SuppressWarnings("unchecked")
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
PrintWriter out = response.getWriter();
Map<String, String[]> params = request.getParameterMap();
SortedSet<String> keys = new TreeSet<String>(params.keySet());
for(String key: keys) {
out.print(key);
out.print(':');
String[] values = params.get(key);
if (values.length > 0) {
out.print(values[0]);
for(int i=1; i < values.length; ++i) {
out.print(',');
out.print(values[i]);
}
}
out.print('\n');
}
out.close();
}
}
@SuppressWarnings("serial")
public static class EchoServlet extends HttpServlet {
@SuppressWarnings("unchecked")
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
PrintWriter out = response.getWriter();
SortedSet<String> sortedKeys = new TreeSet<String>();
Enumeration<String> keys = request.getParameterNames();
while(keys.hasMoreElements()) {
sortedKeys.add(keys.nextElement());
}
for(String key: sortedKeys) {
out.print(key);
out.print(':');
out.print(request.getParameter(key));
out.print('\n');
}
out.close();
}
}
@SuppressWarnings("serial")
public static class HtmlContentServlet extends HttpServlet {
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response
) throws ServletException, IOException {
response.setContentType("text/html");
PrintWriter out = response.getWriter();
out.print("hello world");
out.close();
}
}
@BeforeClass public static void setup() throws Exception {
Configuration conf = new Configuration();
conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
server = createTestServer(conf);
server.addServlet("echo", "/echo", EchoServlet.class);
server.addServlet("echomap", "/echomap", EchoMapServlet.class);
server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class);
server.addServlet("longheader", "/longheader", LongHeaderServlet.class);
server.addJerseyResourcePackage(
JerseyResource.class.getPackage().getName(), "/jersey/*");
server.start();
baseUrl = getServerURL(server);
LOG.info("HTTP server started: "+ baseUrl);
}
@AfterClass public static void cleanup() throws Exception {
server.stop();
}
/** Test the maximum number of threads cannot be exceeded. */
@Test public void testMaxThreads() throws Exception {
int clientThreads = MAX_THREADS * 10;
Executor executor = Executors.newFixedThreadPool(clientThreads);
// Run many clients to make server reach its maximum number of threads
final CountDownLatch ready = new CountDownLatch(clientThreads);
final CountDownLatch start = new CountDownLatch(1);
for (int i = 0; i < clientThreads; i++) {
executor.execute(new Runnable() {
@Override
public void run() {
ready.countDown();
try {
start.await();
assertEquals("a:b\nc:d\n",
readOutput(new URL(baseUrl, "/echo?a=b&c=d")));
int serverThreads = server.webServer.getThreadPool().getThreads();
assertTrue("More threads are started than expected, Server Threads count: "
+ serverThreads, serverThreads <= MAX_THREADS);
System.out.println("Number of threads = " + serverThreads +
" which is less or equal than the max = " + MAX_THREADS);
} catch (Exception e) {
// do nothing
}
}
});
}
// Start the client threads when they are all ready
ready.await();
start.countDown();
}
@Test public void testEcho() throws Exception {
assertEquals("a:b\nc:d\n",
readOutput(new URL(baseUrl, "/echo?a=b&c=d")));
assertEquals("a:b\nc<:d\ne:>\n",
readOutput(new URL(baseUrl, "/echo?a=b&c<=d&e=>")));
}
/** Test the echo map servlet that uses getParameterMap. */
@Test public void testEchoMap() throws Exception {
assertEquals("a:b\nc:d\n",
readOutput(new URL(baseUrl, "/echomap?a=b&c=d")));
assertEquals("a:b,>\nc<:d\n",
readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>")));
}
@Test public void testLongHeader() throws Exception {
URL url = new URL(baseUrl, "/longheader");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
testLongHeader(conn);
}
@Test public void testContentTypes() throws Exception {
// Static CSS files should have text/css
URL cssUrl = new URL(baseUrl, "/static/test.css");
HttpURLConnection conn = (HttpURLConnection)cssUrl.openConnection();
conn.connect();
assertEquals(200, conn.getResponseCode());
assertEquals("text/css", conn.getContentType());
// Servlets should have text/plain with proper encoding by default
URL servletUrl = new URL(baseUrl, "/echo?a=b");
conn = (HttpURLConnection)servletUrl.openConnection();
conn.connect();
assertEquals(200, conn.getResponseCode());
assertEquals("text/plain; charset=utf-8", conn.getContentType());
// We should ignore parameters for mime types - ie a parameter
// ending in .css should not change mime type
servletUrl = new URL(baseUrl, "/echo?a=b.css");
conn = (HttpURLConnection)servletUrl.openConnection();
conn.connect();
assertEquals(200, conn.getResponseCode());
assertEquals("text/plain; charset=utf-8", conn.getContentType());
// Servlets that specify text/html should get that content type
servletUrl = new URL(baseUrl, "/htmlcontent");
conn = (HttpURLConnection)servletUrl.openConnection();
conn.connect();
assertEquals(200, conn.getResponseCode());
assertEquals("text/html; charset=utf-8", conn.getContentType());
}
/**
* Dummy filter that mimics as an authentication filter. Obtains user identity
* from the request parameter user.name. Wraps around the request so that
* request.getRemoteUser() returns the user identity.
*
*/
public static class DummyServletFilter implements Filter {
@Override
public void destroy() { }
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain filterChain) throws IOException, ServletException {
final String userName = request.getParameter("user.name");
ServletRequest requestModified =
new HttpServletRequestWrapper((HttpServletRequest) request) {
@Override
public String getRemoteUser() {
return userName;
}
};
filterChain.doFilter(requestModified, response);
}
@Override
public void init(FilterConfig arg0) throws ServletException { }
}
/**
* FilterInitializer that initialized the DummyFilter.
*
*/
public static class DummyFilterInitializer extends FilterInitializer {
public DummyFilterInitializer() {
}
@Override
public void initFilter(FilterContainer container, Configuration conf) {
container.addFilter("DummyFilter", DummyServletFilter.class.getName(), null);
}
}
/**
* Access a URL and get the corresponding return Http status code. The URL
* will be accessed as the passed user, by sending user.name request
* parameter.
*
* @param urlstring
* @param userName
* @return
* @throws IOException
*/
static int getHttpStatusCode(String urlstring, String userName)
throws IOException {
URL url = new URL(urlstring + "?user.name=" + userName);
System.out.println("Accessing " + url + " as user " + userName);
HttpURLConnection connection = (HttpURLConnection)url.openConnection();
connection.connect();
return connection.getResponseCode();
}
/**
* Custom user->group mapping service.
*/
public static class MyGroupsProvider extends ShellBasedUnixGroupsMapping {
static Map<String, List<String>> mapping = new HashMap<String, List<String>>();
static void clearMapping() {
mapping.clear();
}
@Override
public List<String> getGroups(String user) throws IOException {
return mapping.get(user);
}
}
/**
* Verify the access for /logs, /stacks, /conf, /logLevel and /metrics
* servlets, when authentication filters are set, but authorization is not
* enabled.
* @throws Exception
*/
@Test
public void testDisabledAuthorizationOfDefaultServlets() throws Exception {
Configuration conf = new Configuration();
// Authorization is disabled by default
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
DummyFilterInitializer.class.getName());
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
MyGroupsProvider.class.getName());
Groups.getUserToGroupsMappingService(conf);
MyGroupsProvider.clearMapping();
MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA"));
MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB"));
HttpServer2 myServer = new HttpServer2.Builder().setName("test")
.addEndpoint(new URI("http://localhost:0")).setFindPort(true).build();
myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf);
myServer.start();
String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
for (String servlet : new String[] { "conf", "logs", "stacks",
"logLevel", "metrics" }) {
for (String user : new String[] { "userA", "userB" }) {
assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL
+ servlet, user));
}
}
myServer.stop();
}
/**
* Verify the administrator access for /logs, /stacks, /conf, /logLevel and
* /metrics servlets.
*
* @throws Exception
*/
@Test
public void testAuthorizationOfDefaultServlets() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
true);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
true);
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
DummyFilterInitializer.class.getName());
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
MyGroupsProvider.class.getName());
Groups.getUserToGroupsMappingService(conf);
MyGroupsProvider.clearMapping();
MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA"));
MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB"));
MyGroupsProvider.mapping.put("userC", Arrays.asList("groupC"));
MyGroupsProvider.mapping.put("userD", Arrays.asList("groupD"));
MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE"));
HttpServer2 myServer = new HttpServer2.Builder().setName("test")
.addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf)
.setACL(new AccessControlList("userA,userB groupC,groupD")).build();
myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf);
myServer.start();
String serverURL = "http://"
+ NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
for (String servlet : new String[] { "conf", "logs", "stacks",
"logLevel", "metrics" }) {
for (String user : new String[] { "userA", "userB", "userC", "userD" }) {
assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL
+ servlet, user));
}
assertEquals(HttpURLConnection.HTTP_FORBIDDEN, getHttpStatusCode(
serverURL + servlet, "userE"));
}
myServer.stop();
}
@Test
public void testRequestQuoterWithNull() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.doReturn(null).when(request).getParameterValues("dummy");
RequestQuoter requestQuoter = new RequestQuoter(request);
String[] parameterValues = requestQuoter.getParameterValues("dummy");
Assert.assertNull(
"It should return null " + "when there are no values for the parameter",
parameterValues);
}
@Test
public void testRequestQuoterWithNotNull() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
String[] values = new String[] { "abc", "def" };
Mockito.doReturn(values).when(request).getParameterValues("dummy");
RequestQuoter requestQuoter = new RequestQuoter(request);
String[] parameterValues = requestQuoter.getParameterValues("dummy");
Assert.assertTrue("It should return Parameter Values", Arrays.equals(
values, parameterValues));
}
@SuppressWarnings("unchecked")
private static Map<String, Object> parse(String jsonString) {
return (Map<String, Object>)JSON.parse(jsonString);
}
@Test public void testJersey() throws Exception {
LOG.info("BEGIN testJersey()");
final String js = readOutput(new URL(baseUrl, "/jersey/foo?op=bar"));
final Map<String, Object> m = parse(js);
LOG.info("m=" + m);
assertEquals("foo", m.get(JerseyResource.PATH));
assertEquals("bar", m.get(JerseyResource.OP));
LOG.info("END testJersey()");
}
@Test
public void testHasAdministratorAccess() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false);
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(null);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRemoteUser()).thenReturn(null);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
//authorization OFF
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
//authorization ON & user NULL
response = Mockito.mock(HttpServletResponse.class);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
Assert.assertFalse(HttpServer2.hasAdministratorAccess(context, request, response));
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN), Mockito.anyString());
//authorization ON & user NOT NULL & ACLs NULL
response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getRemoteUser()).thenReturn("foo");
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
//authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs
response = Mockito.mock(HttpServletResponse.class);
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertFalse(HttpServer2.hasAdministratorAccess(context, request, response));
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN), Mockito.anyString());
//authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs
response = Mockito.mock(HttpServletResponse.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(true);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
}
@Test
public void testRequiresAuthorizationAccess() throws Exception {
Configuration conf = new Configuration();
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
//requires admin access to instrumentation, FALSE by default
Assert.assertTrue(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
//requires admin access to instrumentation, TRUE
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
AccessControlList acls = Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertFalse(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
}
@Test public void testBindAddress() throws Exception {
checkBindAddress("localhost", 0, false).stop();
// hang onto this one for a bit more testing
HttpServer2 myServer = checkBindAddress("localhost", 0, false);
HttpServer2 myServer2 = null;
try {
int port = myServer.getConnectorAddress(0).getPort();
// it's already in use, true = expect a higher port
myServer2 = checkBindAddress("localhost", port, true);
// try to reuse the port
port = myServer2.getConnectorAddress(0).getPort();
myServer2.stop();
assertNull(myServer2.getConnectorAddress(0)); // not bound
myServer2.openListeners();
assertEquals(port, myServer2.getConnectorAddress(0).getPort()); // expect same port
} finally {
myServer.stop();
if (myServer2 != null) {
myServer2.stop();
}
}
}
private HttpServer2 checkBindAddress(String host, int port, boolean findPort)
throws Exception {
HttpServer2 server = createServer(host, port);
try {
// not bound, ephemeral should return requested port (0 for ephemeral)
List<?> listeners = (List<?>) Whitebox.getInternalState(server,
"listeners");
Connector listener = (Connector) listeners.get(0);
assertEquals(port, listener.getPort());
// verify hostname is what was given
server.openListeners();
assertEquals(host, server.getConnectorAddress(0).getHostName());
int boundPort = server.getConnectorAddress(0).getPort();
if (port == 0) {
assertTrue(boundPort != 0); // ephemeral should now return bound port
} else if (findPort) {
assertTrue(boundPort > port);
// allow a little wiggle room to prevent random test failures if
// some consecutive ports are already in use
assertTrue(boundPort - port < 8);
}
} catch (Exception e) {
server.stop();
throw e;
}
return server;
}
@Test
public void testNoCacheHeader() throws Exception {
URL url = new URL(baseUrl, "/echo?a=b&c=d");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
assertEquals("no-cache", conn.getHeaderField("Cache-Control"));
assertEquals("no-cache", conn.getHeaderField("Pragma"));
assertNotNull(conn.getHeaderField("Expires"));
assertNotNull(conn.getHeaderField("Date"));
assertEquals(conn.getHeaderField("Expires"), conn.getHeaderField("Date"));
}
}
| 22,442 | 39.076786 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URL;
import java.net.URLConnection;
import java.util.Set;
import java.util.TreeSet;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.junit.Test;
public class TestGlobalFilter extends HttpServerFunctionalTest {
static final Log LOG = LogFactory.getLog(HttpServer2.class);
static final Set<String> RECORDS = new TreeSet<String>();
/** A very simple filter that records accessed uri's */
static public class RecordingFilter implements Filter {
private FilterConfig filterConfig = null;
@Override
public void init(FilterConfig filterConfig) {
this.filterConfig = filterConfig;
}
@Override
public void destroy() {
this.filterConfig = null;
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain) throws IOException, ServletException {
if (filterConfig == null)
return;
String uri = ((HttpServletRequest)request).getRequestURI();
LOG.info("filtering " + uri);
RECORDS.add(uri);
chain.doFilter(request, response);
}
/** Configuration for RecordingFilter */
static public class Initializer extends FilterInitializer {
public Initializer() {}
@Override
public void initFilter(FilterContainer container, Configuration conf) {
container.addGlobalFilter("recording", RecordingFilter.class.getName(), null);
}
}
}
/** access a url, ignoring some IOException such as the page does not exist */
static void access(String urlstring) throws IOException {
LOG.warn("access " + urlstring);
URL url = new URL(urlstring);
URLConnection connection = url.openConnection();
connection.connect();
try {
BufferedReader in = new BufferedReader(new InputStreamReader(
connection.getInputStream()));
try {
for(; in.readLine() != null; );
} finally {
in.close();
}
} catch(IOException ioe) {
LOG.warn("urlstring=" + urlstring, ioe);
}
}
@Test
public void testServletFilter() throws Exception {
Configuration conf = new Configuration();
//start a http server with CountingFilter
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
RecordingFilter.Initializer.class.getName());
HttpServer2 http = createTestServer(conf);
http.start();
final String fsckURL = "/fsck";
final String stacksURL = "/stacks";
final String ajspURL = "/a.jsp";
final String listPathsURL = "/listPaths";
final String dataURL = "/data";
final String streamFile = "/streamFile";
final String rootURL = "/";
final String allURL = "/*";
final String outURL = "/static/a.out";
final String logURL = "/logs/a.log";
final String[] urls = {fsckURL, stacksURL, ajspURL, listPathsURL,
dataURL, streamFile, rootURL, allURL, outURL, logURL};
//access the urls
final String prefix = "http://"
+ NetUtils.getHostPortString(http.getConnectorAddress(0));
try {
for(int i = 0; i < urls.length; i++) {
access(prefix + urls[i]);
}
} finally {
http.stop();
}
LOG.info("RECORDS = " + RECORDS);
//verify records
for(int i = 0; i < urls.length; i++) {
assertTrue(RECORDS.remove(urls[i]));
}
assertTrue(RECORDS.isEmpty());
}
}
| 4,674 | 30.587838 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.InputStream;
import java.net.URI;
import java.net.URL;
import javax.net.ssl.HttpsURLConnection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This testcase issues SSL certificates configures the HttpServer to serve
* HTTPS using the created certficates and calls an echo servlet using the
* corresponding HTTPS URL.
*/
public class TestSSLHttpServer extends HttpServerFunctionalTest {
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestSSLHttpServer.class.getSimpleName();
private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
private static Configuration conf;
private static HttpServer2 server;
private static String keystoresDir;
private static String sslConfDir;
private static SSLFactory clientSslFactory;
@BeforeClass
public static void setup() throws Exception {
conf = new Configuration();
conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
Configuration sslConf = new Configuration(false);
sslConf.addResource("ssl-server.xml");
sslConf.addResource("ssl-client.xml");
clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
clientSslFactory.init();
server = new HttpServer2.Builder()
.setName("test")
.addEndpoint(new URI("https://localhost"))
.setConf(conf)
.keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
.keyStore(sslConf.get("ssl.server.keystore.location"),
sslConf.get("ssl.server.keystore.password"),
sslConf.get("ssl.server.keystore.type", "jks"))
.trustStore(sslConf.get("ssl.server.truststore.location"),
sslConf.get("ssl.server.truststore.password"),
sslConf.get("ssl.server.truststore.type", "jks")).build();
server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
server.addServlet("longheader", "/longheader", LongHeaderServlet.class);
server.start();
baseUrl = new URL("https://"
+ NetUtils.getHostPortString(server.getConnectorAddress(0)));
LOG.info("HTTP server started: " + baseUrl);
}
@AfterClass
public static void cleanup() throws Exception {
server.stop();
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
clientSslFactory.destroy();
}
@Test
public void testEcho() throws Exception {
assertEquals("a:b\nc:d\n", readOut(new URL(baseUrl, "/echo?a=b&c=d")));
assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl,
"/echo?a=b&c<=d&e=>")));
}
/**
* Test that verifies headers can be up to 64K long.
* The test adds a 63K header leaving 1K for other headers.
* This is because the header buffer setting is for ALL headers,
* names and values included. */
@Test
public void testLongHeader() throws Exception {
URL url = new URL(baseUrl, "/longheader");
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());
testLongHeader(conn);
}
private static String readOut(URL url) throws Exception {
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());
InputStream in = conn.getInputStream();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(in, out, 1024);
return out.toString();
}
}
| 5,077 | 37.469697 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestAuthenticationSessionCookie.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.http;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.junit.After;
import org.junit.Test;
import org.mortbay.log.Log;
import javax.servlet.*;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.net.HttpCookie;
import java.util.List;
public class TestAuthenticationSessionCookie {
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestHttpCookieFlag.class.getSimpleName();
private static boolean isCookiePersistent;
private static final long TOKEN_VALIDITY_SEC = 1000;
private static long expires;
private static String keystoresDir;
private static String sslConfDir;
private static HttpServer2 server;
public static class DummyAuthenticationFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
isCookiePersistent = false;
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain) throws IOException,
ServletException {
HttpServletResponse resp = (HttpServletResponse) response;
AuthenticationFilter.createAuthCookie(resp, "token", null, null, expires,
isCookiePersistent, true);
chain.doFilter(request, resp);
}
@Override
public void destroy() {
}
}
public static class DummyFilterInitializer extends FilterInitializer {
@Override
public void initFilter(FilterContainer container, Configuration conf) {
container.addFilter("DummyAuth", DummyAuthenticationFilter.class
.getName(), null);
}
}
public static class Dummy2AuthenticationFilter
extends DummyAuthenticationFilter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
isCookiePersistent = true;
expires = System.currentTimeMillis() + TOKEN_VALIDITY_SEC;
}
@Override
public void destroy() {
}
}
public static class Dummy2FilterInitializer extends FilterInitializer {
@Override
public void initFilter(FilterContainer container, Configuration conf) {
container.addFilter("Dummy2Auth", Dummy2AuthenticationFilter.class
.getName(), null);
}
}
public void startServer(boolean isTestSessionCookie) throws Exception {
Configuration conf = new Configuration();
if (isTestSessionCookie) {
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
DummyFilterInitializer.class.getName());
} else {
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
Dummy2FilterInitializer.class.getName());
}
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
Configuration sslConf = new Configuration(false);
sslConf.addResource("ssl-server.xml");
sslConf.addResource("ssl-client.xml");
server = new HttpServer2.Builder()
.setName("test")
.addEndpoint(new URI("http://localhost"))
.addEndpoint(new URI("https://localhost"))
.setConf(conf)
.keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
.keyStore(sslConf.get("ssl.server.keystore.location"),
sslConf.get("ssl.server.keystore.password"),
sslConf.get("ssl.server.keystore.type", "jks"))
.trustStore(sslConf.get("ssl.server.truststore.location"),
sslConf.get("ssl.server.truststore.password"),
sslConf.get("ssl.server.truststore.type", "jks")).build();
server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
server.start();
}
@Test
public void testSessionCookie() throws IOException {
try {
startServer(true);
} catch (Exception e) {
// Auto-generated catch block
e.printStackTrace();
}
URL base = new URL("http://" + NetUtils.getHostPortString(server
.getConnectorAddress(0)));
HttpURLConnection conn = (HttpURLConnection) new URL(base,
"/echo").openConnection();
String header = conn.getHeaderField("Set-Cookie");
List<HttpCookie> cookies = HttpCookie.parse(header);
Assert.assertTrue(!cookies.isEmpty());
Log.info(header);
Assert.assertFalse(header.contains("; Expires="));
Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
@Test
public void testPersistentCookie() throws IOException {
try {
startServer(false);
} catch (Exception e) {
// Auto-generated catch block
e.printStackTrace();
}
URL base = new URL("http://" + NetUtils.getHostPortString(server
.getConnectorAddress(0)));
HttpURLConnection conn = (HttpURLConnection) new URL(base,
"/echo").openConnection();
String header = conn.getHeaderField("Set-Cookie");
List<HttpCookie> cookies = HttpCookie.parse(header);
Assert.assertTrue(!cookies.isEmpty());
Log.info(header);
Assert.assertTrue(header.contains("; Expires="));
Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
@After
public void cleanup() throws Exception {
server.stop();
FileUtil.fullyDelete(new File(BASEDIR));
KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
}
}
| 6,519 | 33.680851 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWebapps.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.junit.Test;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import java.io.FileNotFoundException;
/**
* Test webapp loading
*/
public class TestHttpServerWebapps extends HttpServerFunctionalTest {
private static final Log log = LogFactory.getLog(TestHttpServerWebapps.class);
/**
* Test that the test server is loadable on the classpath
* @throws Throwable if something went wrong
*/
@Test
public void testValidServerResource() throws Throwable {
HttpServer2 server = null;
try {
server = createServer("test");
} finally {
stop(server);
}
}
/**
* Test that an invalid webapp triggers an exception
* @throws Throwable if something went wrong
*/
@Test
public void testMissingServerResource() throws Throwable {
try {
HttpServer2 server = createServer("NoSuchWebapp");
//should not have got here.
//close the server
String serverDescription = server.toString();
stop(server);
fail("Expected an exception, got " + serverDescription);
} catch (FileNotFoundException expected) {
log.debug("Expected exception " + expected, expected);
}
}
}
| 2,048 | 30.045455 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.apache.log4j.Logger;
import org.junit.Test;
import org.mortbay.jetty.NCSARequestLog;
import org.mortbay.jetty.RequestLog;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
public class TestHttpRequestLog {
@Test
public void testAppenderUndefined() {
RequestLog requestLog = HttpRequestLog.getRequestLog("test");
assertNull("RequestLog should be null", requestLog);
}
@Test
public void testAppenderDefined() {
HttpRequestLogAppender requestLogAppender = new HttpRequestLogAppender();
requestLogAppender.setName("testrequestlog");
Logger.getLogger("http.requests.test").addAppender(requestLogAppender);
RequestLog requestLog = HttpRequestLog.getRequestLog("test");
Logger.getLogger("http.requests.test").removeAppender(requestLogAppender);
assertNotNull("RequestLog should not be null", requestLog);
assertEquals("Class mismatch", NCSARequestLog.class, requestLog.getClass());
}
}
| 1,866 | 37.895833 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URL;
import java.net.URLConnection;
import java.util.Set;
import java.util.TreeSet;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.junit.Test;
public class TestPathFilter extends HttpServerFunctionalTest {
static final Log LOG = LogFactory.getLog(HttpServer2.class);
static final Set<String> RECORDS = new TreeSet<String>();
/** A very simple filter that records accessed uri's */
static public class RecordingFilter implements Filter {
private FilterConfig filterConfig = null;
@Override
public void init(FilterConfig filterConfig) {
this.filterConfig = filterConfig;
}
@Override
public void destroy() {
this.filterConfig = null;
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain) throws IOException, ServletException {
if (filterConfig == null)
return;
String uri = ((HttpServletRequest)request).getRequestURI();
LOG.info("filtering " + uri);
RECORDS.add(uri);
chain.doFilter(request, response);
}
/** Configuration for RecordingFilter */
static public class Initializer extends FilterInitializer {
public Initializer() {}
@Override
public void initFilter(FilterContainer container, Configuration conf) {
container.addFilter("recording", RecordingFilter.class.getName(), null);
}
}
}
/** access a url, ignoring some IOException such as the page does not exist */
static void access(String urlstring) throws IOException {
LOG.warn("access " + urlstring);
URL url = new URL(urlstring);
URLConnection connection = url.openConnection();
connection.connect();
try {
BufferedReader in = new BufferedReader(new InputStreamReader(
connection.getInputStream()));
try {
for(; in.readLine() != null; );
} finally {
in.close();
}
} catch(IOException ioe) {
LOG.warn("urlstring=" + urlstring, ioe);
}
}
@Test
public void testPathSpecFilters() throws Exception {
Configuration conf = new Configuration();
//start a http server with CountingFilter
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
RecordingFilter.Initializer.class.getName());
String[] pathSpecs = { "/path", "/path/*" };
HttpServer2 http = createTestServer(conf, pathSpecs);
http.start();
final String baseURL = "/path";
final String baseSlashURL = "/path/";
final String addedURL = "/path/nodes";
final String addedSlashURL = "/path/nodes/";
final String longURL = "/path/nodes/foo/job";
final String rootURL = "/";
final String allURL = "/*";
final String[] filteredUrls = {baseURL, baseSlashURL, addedURL,
addedSlashURL, longURL};
final String[] notFilteredUrls = {rootURL, allURL};
// access the urls and verify our paths specs got added to the
// filters
final String prefix = "http://"
+ NetUtils.getHostPortString(http.getConnectorAddress(0));
try {
for(int i = 0; i < filteredUrls.length; i++) {
access(prefix + filteredUrls[i]);
}
for(int i = 0; i < notFilteredUrls.length; i++) {
access(prefix + notFilteredUrls[i]);
}
} finally {
http.stop();
}
LOG.info("RECORDS = " + RECORDS);
//verify records
for(int i = 0; i < filteredUrls.length; i++) {
assertTrue(RECORDS.remove(filteredUrls[i]));
}
assertTrue(RECORDS.isEmpty());
}
}
| 4,855 | 30.947368 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.URL;
import java.net.URLConnection;
import java.util.Random;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
public class TestServletFilter extends HttpServerFunctionalTest {
static final Log LOG = LogFactory.getLog(HttpServer2.class);
static volatile String uri = null;
/** A very simple filter which record the uri filtered. */
static public class SimpleFilter implements Filter {
private FilterConfig filterConfig = null;
@Override
public void init(FilterConfig filterConfig) throws ServletException {
this.filterConfig = filterConfig;
}
@Override
public void destroy() {
this.filterConfig = null;
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain) throws IOException, ServletException {
if (filterConfig == null)
return;
uri = ((HttpServletRequest)request).getRequestURI();
LOG.info("filtering " + uri);
chain.doFilter(request, response);
}
/** Configuration for the filter */
static public class Initializer extends FilterInitializer {
public Initializer() {}
@Override
public void initFilter(FilterContainer container, Configuration conf) {
container.addFilter("simple", SimpleFilter.class.getName(), null);
}
}
}
/** access a url, ignoring some IOException such as the page does not exist */
static void access(String urlstring) throws IOException {
LOG.warn("access " + urlstring);
URL url = new URL(urlstring);
URLConnection connection = url.openConnection();
connection.connect();
try {
BufferedReader in = new BufferedReader(new InputStreamReader(
connection.getInputStream()));
try {
for(; in.readLine() != null; );
} finally {
in.close();
}
} catch(IOException ioe) {
LOG.warn("urlstring=" + urlstring, ioe);
}
}
@Test
public void testServletFilter() throws Exception {
Configuration conf = new Configuration();
//start a http server with CountingFilter
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
SimpleFilter.Initializer.class.getName());
HttpServer2 http = createTestServer(conf);
http.start();
final String fsckURL = "/fsck";
final String stacksURL = "/stacks";
final String ajspURL = "/a.jsp";
final String logURL = "/logs/a.log";
final String hadooplogoURL = "/static/hadoop-logo.jpg";
final String[] urls = {fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL};
final Random ran = new Random();
final int[] sequence = new int[50];
//generate a random sequence and update counts
for(int i = 0; i < sequence.length; i++) {
sequence[i] = ran.nextInt(urls.length);
}
//access the urls as the sequence
final String prefix = "http://"
+ NetUtils.getHostPortString(http.getConnectorAddress(0));
try {
for(int i = 0; i < sequence.length; i++) {
access(prefix + urls[sequence[i]]);
//make sure everything except fsck get filtered
if (sequence[i] == 0) {
assertEquals(null, uri);
} else {
assertEquals(urls[sequence[i]], uri);
uri = null;
}
}
} finally {
http.stop();
}
}
static public class ErrorFilter extends SimpleFilter {
@Override
public void init(FilterConfig arg0) throws ServletException {
throw new ServletException("Throwing the exception from Filter init");
}
/** Configuration for the filter */
static public class Initializer extends FilterInitializer {
public Initializer() {
}
public void initFilter(FilterContainer container, Configuration conf) {
container.addFilter("simple", ErrorFilter.class.getName(), null);
}
}
}
@Test
public void testServletFilterWhenInitThrowsException() throws Exception {
Configuration conf = new Configuration();
// start a http server with CountingFilter
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
ErrorFilter.Initializer.class.getName());
HttpServer2 http = createTestServer(conf);
try {
http.start();
fail("expecting exception");
} catch (IOException e) {
assertTrue(e.getMessage().contains(
"Problem in starting http server. Server handlers failed"));
}
}
/**
* Similar to the above test case, except that it uses a different API to add
* the filter. Regression test for HADOOP-8786.
*/
@Test
public void testContextSpecificServletFilterWhenInitThrowsException()
throws Exception {
Configuration conf = new Configuration();
HttpServer2 http = createTestServer(conf);
HttpServer2.defineFilter(http.webAppContext,
"ErrorFilter", ErrorFilter.class.getName(),
null, null);
try {
http.start();
fail("expecting exception");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Unable to initialize WebAppContext", e);
}
}
}
| 6,465 | 31.009901 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/lib/TestStaticUserWebFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http.lib;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.http.lib.StaticUserWebFilter.StaticUserFilter;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
public class TestStaticUserWebFilter {
private FilterConfig mockConfig(String username) {
FilterConfig mock = Mockito.mock(FilterConfig.class);
Mockito.doReturn(username).when(mock).getInitParameter(
CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER);
return mock;
}
@Test
public void testFilter() throws Exception {
FilterConfig config = mockConfig("myuser");
StaticUserFilter suf = new StaticUserFilter();
suf.init(config);
ArgumentCaptor<HttpServletRequestWrapper> wrapperArg =
ArgumentCaptor.forClass(HttpServletRequestWrapper.class);
FilterChain chain = mock(FilterChain.class);
suf.doFilter(mock(HttpServletRequest.class), mock(ServletResponse.class),
chain);
Mockito.verify(chain).doFilter(wrapperArg.capture(), Mockito.<ServletResponse>anyObject());
HttpServletRequestWrapper wrapper = wrapperArg.getValue();
assertEquals("myuser", wrapper.getUserPrincipal().getName());
assertEquals("myuser", wrapper.getRemoteUser());
suf.destroy();
}
@Test
public void testOldStyleConfiguration() {
Configuration conf = new Configuration();
conf.set("dfs.web.ugi", "joe,group1,group2");
assertEquals("joe", StaticUserWebFilter.getUsernameFromConf(conf));
}
@Test
public void testConfiguration() {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER, "joe");
assertEquals("joe", StaticUserWebFilter.getUsernameFromConf(conf));
}
}
| 2,914 | 34.54878 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http.resource;
import java.io.IOException;
import java.util.Map;
import java.util.TreeMap;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.mortbay.util.ajax.JSON;
/**
* A simple Jersey resource class TestHttpServer.
* The servlet simply puts the path and the op parameter in a map
* and return it in JSON format in the response.
*/
@Path("")
public class JerseyResource {
static final Log LOG = LogFactory.getLog(JerseyResource.class);
public static final String PATH = "path";
public static final String OP = "op";
@GET
@Path("{" + PATH + ":.*}")
@Produces({MediaType.APPLICATION_JSON})
public Response get(
@PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path,
@QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op
) throws IOException {
LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op);
final Map<String, Object> m = new TreeMap<String, Object>();
m.put(PATH, path);
m.put(OP, op);
final String js = JSON.toString(m);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
}
| 2,217 | 33.123077 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/HadoopIllegalArgumentException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Indicates that a method has been passed illegal or invalid argument. This
* exception is thrown instead of IllegalArgumentException to differentiate the
* exception thrown in Hadoop implementation from the one thrown in JDK.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class HadoopIllegalArgumentException extends IllegalArgumentException {
private static final long serialVersionUID = 1L;
/**
* Constructs exception with the specified detail message.
* @param message detailed message.
*/
public HadoopIllegalArgumentException(final String message) {
super(message);
}
}
| 1,581 | 37.585366 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.retry.Idempotent;
/**
* Protocol implemented by the Name Node and Job Tracker which maps users to
* groups.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface GetUserMappingsProtocol {
/**
* Version 1: Initial version.
*/
public static final long versionID = 1L;
/**
* Get the groups which are mapped to the given user.
* @param user The user to get the groups for.
* @return The set of groups the user belongs to.
* @throws IOException
*/
@Idempotent
public String[] getGroupsForUser(String user) throws IOException;
}
| 1,633 | 33.041667 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.tools;
import org.apache.hadoop.classification.InterfaceAudience;
| 926 | 41.136364 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.util.ArrayList;
import java.util.LinkedList;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.WordUtils;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* This class implements a "table listing" with column headers.
*
* Example:
*
* NAME OWNER GROUP MODE WEIGHT
* pool1 andrew andrew rwxr-xr-x 100
* pool2 andrew andrew rwxr-xr-x 100
* pool3 andrew andrew rwxr-xr-x 100
*
*/
@InterfaceAudience.Private
public class TableListing {
public enum Justification {
LEFT,
RIGHT;
}
private static class Column {
private final ArrayList<String> rows;
private final Justification justification;
private final boolean wrap;
private int wrapWidth = Integer.MAX_VALUE;
private int maxWidth;
Column(String title, Justification justification, boolean wrap) {
this.rows = new ArrayList<String>();
this.justification = justification;
this.wrap = wrap;
this.maxWidth = 0;
addRow(title);
}
private void addRow(String val) {
if (val == null) {
val = "";
}
if ((val.length() + 1) > maxWidth) {
maxWidth = val.length() + 1;
}
// Ceiling at wrapWidth, because it'll get wrapped
if (maxWidth > wrapWidth) {
maxWidth = wrapWidth;
}
rows.add(val);
}
private int getMaxWidth() {
return maxWidth;
}
private void setWrapWidth(int width) {
wrapWidth = width;
// Ceiling the maxLength at wrapWidth
if (maxWidth > wrapWidth) {
maxWidth = wrapWidth;
}
// Else we need to traverse through and find the real maxWidth
else {
maxWidth = 0;
for (int i=0; i<rows.size(); i++) {
int length = rows.get(i).length();
if (length > maxWidth) {
maxWidth = length;
}
}
}
}
/**
* Return the ith row of the column as a set of wrapped strings, each at
* most wrapWidth in length.
*/
String[] getRow(int idx) {
String raw = rows.get(idx);
// Line-wrap if it's too long
String[] lines = new String[] {raw};
if (wrap) {
lines = WordUtils.wrap(lines[0], wrapWidth, "\n", true).split("\n");
}
for (int i=0; i<lines.length; i++) {
if (justification == Justification.LEFT) {
lines[i] = StringUtils.rightPad(lines[i], maxWidth);
} else if (justification == Justification.RIGHT) {
lines[i] = StringUtils.leftPad(lines[i], maxWidth);
}
}
return lines;
}
}
public static class Builder {
private final LinkedList<Column> columns = new LinkedList<Column>();
private boolean showHeader = true;
private int wrapWidth = Integer.MAX_VALUE;
/**
* Create a new Builder.
*/
public Builder() {
}
public Builder addField(String title) {
return addField(title, Justification.LEFT, false);
}
public Builder addField(String title, Justification justification) {
return addField(title, justification, false);
}
public Builder addField(String title, boolean wrap) {
return addField(title, Justification.LEFT, wrap);
}
/**
* Add a new field to the Table under construction.
*
* @param title Field title.
* @param justification Right or left justification. Defaults to left.
* @param wrap Width at which to auto-wrap the content of the cell.
* Defaults to Integer.MAX_VALUE.
* @return This Builder object
*/
public Builder addField(String title, Justification justification,
boolean wrap) {
columns.add(new Column(title, justification, wrap));
return this;
}
/**
* Whether to hide column headers in table output
*/
public Builder hideHeaders() {
this.showHeader = false;
return this;
}
/**
* Whether to show column headers in table output. This is the default.
*/
public Builder showHeaders() {
this.showHeader = true;
return this;
}
/**
* Set the maximum width of a row in the TableListing. Must have one or
* more wrappable fields for this to take effect.
*/
public Builder wrapWidth(int width) {
this.wrapWidth = width;
return this;
}
/**
* Create a new TableListing.
*/
public TableListing build() {
return new TableListing(columns.toArray(new Column[0]), showHeader,
wrapWidth);
}
}
private final Column columns[];
private int numRows;
private final boolean showHeader;
private final int wrapWidth;
TableListing(Column columns[], boolean showHeader, int wrapWidth) {
this.columns = columns;
this.numRows = 0;
this.showHeader = showHeader;
this.wrapWidth = wrapWidth;
}
/**
* Add a new row.
*
* @param row The row of objects to add-- one per column.
*/
public void addRow(String... row) {
if (row.length != columns.length) {
throw new RuntimeException("trying to add a row with " + row.length +
" columns, but we have " + columns.length + " columns.");
}
for (int i = 0; i < columns.length; i++) {
columns[i].addRow(row[i]);
}
numRows++;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
// Calculate the widths of each column based on their maxWidths and
// the wrapWidth for the entire table
int width = (columns.length-1)*2; // inter-column padding
for (int i=0; i<columns.length; i++) {
width += columns[i].maxWidth;
}
// Decrease the column size of wrappable columns until the goal width
// is reached, or we can't decrease anymore
while (width > wrapWidth) {
boolean modified = false;
for (int i=0; i<columns.length; i++) {
Column column = columns[i];
if (column.wrap) {
int maxWidth = column.getMaxWidth();
if (maxWidth > 4) {
column.setWrapWidth(maxWidth-1);
modified = true;
width -= 1;
if (width <= wrapWidth) {
break;
}
}
}
}
if (!modified) {
break;
}
}
int startrow = 0;
if (!showHeader) {
startrow = 1;
}
String[][] columnLines = new String[columns.length][];
for (int i = startrow; i < numRows + 1; i++) {
int maxColumnLines = 0;
for (int j = 0; j < columns.length; j++) {
columnLines[j] = columns[j].getRow(i);
if (columnLines[j].length > maxColumnLines) {
maxColumnLines = columnLines[j].length;
}
}
for (int c = 0; c < maxColumnLines; c++) {
// First column gets no left-padding
String prefix = "";
for (int j = 0; j < columns.length; j++) {
// Prepend padding
builder.append(prefix);
prefix = " ";
if (columnLines[j].length > c) {
builder.append(columnLines[j][c]);
} else {
builder.append(StringUtils.repeat(" ", columns[j].maxWidth));
}
}
builder.append("\n");
}
}
return builder.toString();
}
}
| 8,093 | 27.4 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetGroupsBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Tool;
/**
* Base class for the HDFS and MR implementations of tools which fetch and
* display the groups that users belong to.
*/
public abstract class GetGroupsBase extends Configured implements Tool {
private PrintStream out;
/**
* Create an instance of this tool using the given configuration.
* @param conf
*/
protected GetGroupsBase(Configuration conf) {
this(conf, System.out);
}
/**
* Used exclusively for testing.
*
* @param conf The configuration to use.
* @param out The PrintStream to write to, instead of System.out
*/
protected GetGroupsBase(Configuration conf, PrintStream out) {
super(conf);
this.out = out;
}
/**
* Get the groups for the users given and print formatted output to the
* {@link PrintStream} configured earlier.
*/
@Override
public int run(String[] args) throws Exception {
if (args.length == 0) {
args = new String[] { UserGroupInformation.getCurrentUser().getUserName() };
}
for (String username : args) {
StringBuilder sb = new StringBuilder();
sb.append(username + " :");
for (String group : getUgmProtocol().getGroupsForUser(username)) {
sb.append(" ");
sb.append(group);
}
out.println(sb);
}
return 0;
}
/**
* Must be overridden by subclasses to get the address where the
* {@link GetUserMappingsProtocol} implementation is running.
*
* @param conf The configuration to use.
* @return The address where the service is listening.
* @throws IOException
*/
protected abstract InetSocketAddress getProtocolAddress(Configuration conf)
throws IOException;
/**
* Get a client of the {@link GetUserMappingsProtocol}.
* @return A {@link GetUserMappingsProtocol} client proxy.
* @throws IOException
*/
protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
GetUserMappingsProtocol userGroupMappingProtocol =
RPC.getProxy(GetUserMappingsProtocol.class,
GetUserMappingsProtocol.versionID,
getProtocolAddress(getConf()), UserGroupInformation.getCurrentUser(),
getConf(), NetUtils.getSocketFactory(getConf(),
GetUserMappingsProtocol.class));
return userGroupMappingProtocol;
}
}
| 3,468 | 31.12037 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@ProtocolInfo(
protocolName = "org.apache.hadoop.tools.GetUserMappingsProtocol",
protocolVersion = 1)
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface GetUserMappingsProtocolPB extends
GetUserMappingsProtocolService.BlockingInterface {
}
| 1,617 | 41.578947 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class GetUserMappingsProtocolClientSideTranslatorPB implements
ProtocolMetaInterface, GetUserMappingsProtocol, Closeable {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final GetUserMappingsProtocolPB rpcProxy;
public GetUserMappingsProtocolClientSideTranslatorPB(
GetUserMappingsProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);
}
@Override
public String[] getGroupsForUser(String user) throws IOException {
GetGroupsForUserRequestProto request = GetGroupsForUserRequestProto
.newBuilder().setUser(user).build();
GetGroupsForUserResponseProto resp;
try {
resp = rpcProxy.getGroupsForUser(NULL_CONTROLLER, request);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
return resp.getGroupsList().toArray(new String[resp.getGroupsCount()]);
}
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy,
GetUserMappingsProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(GetUserMappingsProtocolPB.class), methodName);
}
}
| 2,743 | 37.647887 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools.protocolPB;
import java.io.IOException;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class GetUserMappingsProtocolServerSideTranslatorPB implements
GetUserMappingsProtocolPB {
private final GetUserMappingsProtocol impl;
public GetUserMappingsProtocolServerSideTranslatorPB(
GetUserMappingsProtocol impl) {
this.impl = impl;
}
@Override
public GetGroupsForUserResponseProto getGroupsForUser(
RpcController controller, GetGroupsForUserRequestProto request)
throws ServiceException {
String[] groups;
try {
groups = impl.getGroupsForUser(request.getUser());
} catch (IOException e) {
throw new ServiceException(e);
}
GetGroupsForUserResponseProto.Builder builder = GetGroupsForUserResponseProto
.newBuilder();
for (String g : groups) {
builder.addGroups(g);
}
return builder.build();
}
}
| 2,028 | 33.982759 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.security.GeneralSecurityException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.util.PerformanceAdvisory;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Splitter;
import com.google.common.collect.Lists;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT;
/**
* Crypto codec class, encapsulates encryptor/decryptor pair.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class CryptoCodec implements Configurable {
public static Logger LOG = LoggerFactory.getLogger(CryptoCodec.class);
/**
* Get crypto codec for specified algorithm/mode/padding.
*
* @param conf
* the configuration
* @param cipherSuite
* algorithm/mode/padding
* @return CryptoCodec the codec object. Null value will be returned if no
* crypto codec classes with cipher suite configured.
*/
public static CryptoCodec getInstance(Configuration conf,
CipherSuite cipherSuite) {
List<Class<? extends CryptoCodec>> klasses = getCodecClasses(
conf, cipherSuite);
if (klasses == null) {
return null;
}
CryptoCodec codec = null;
for (Class<? extends CryptoCodec> klass : klasses) {
try {
CryptoCodec c = ReflectionUtils.newInstance(klass, conf);
if (c.getCipherSuite().getName().equals(cipherSuite.getName())) {
if (codec == null) {
PerformanceAdvisory.LOG.debug("Using crypto codec {}.",
klass.getName());
codec = c;
}
} else {
PerformanceAdvisory.LOG.debug(
"Crypto codec {} doesn't meet the cipher suite {}.",
klass.getName(), cipherSuite.getName());
}
} catch (Exception e) {
PerformanceAdvisory.LOG.debug("Crypto codec {} is not available.",
klass.getName());
}
}
return codec;
}
/**
* Get crypto codec for algorithm/mode/padding in config value
* hadoop.security.crypto.cipher.suite
*
* @param conf
* the configuration
* @return CryptoCodec the codec object Null value will be returned if no
* crypto codec classes with cipher suite configured.
*/
public static CryptoCodec getInstance(Configuration conf) {
String name = conf.get(HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY,
HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT);
return getInstance(conf, CipherSuite.convert(name));
}
private static List<Class<? extends CryptoCodec>> getCodecClasses(
Configuration conf, CipherSuite cipherSuite) {
List<Class<? extends CryptoCodec>> result = Lists.newArrayList();
String configName = HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX +
cipherSuite.getConfigSuffix();
String codecString;
if (configName.equals(CommonConfigurationKeysPublic
.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY)) {
codecString = conf.get(configName, CommonConfigurationKeysPublic
.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_DEFAULT);
} else {
codecString = conf.get(configName);
}
if (codecString == null) {
PerformanceAdvisory.LOG.debug(
"No crypto codec classes with cipher suite configured.");
return null;
}
for (String c : Splitter.on(',').trimResults().omitEmptyStrings().
split(codecString)) {
try {
Class<?> cls = conf.getClassByName(c);
result.add(cls.asSubclass(CryptoCodec.class));
} catch (ClassCastException e) {
PerformanceAdvisory.LOG.debug("Class {} is not a CryptoCodec.", c);
} catch (ClassNotFoundException e) {
PerformanceAdvisory.LOG.debug("Crypto codec {} not found.", c);
}
}
return result;
}
/**
* @return the CipherSuite for this codec.
*/
public abstract CipherSuite getCipherSuite();
/**
* Create a {@link org.apache.hadoop.crypto.Encryptor}.
* @return Encryptor the encryptor
*/
public abstract Encryptor createEncryptor() throws GeneralSecurityException;
/**
* Create a {@link org.apache.hadoop.crypto.Decryptor}.
* @return Decryptor the decryptor
*/
public abstract Decryptor createDecryptor() throws GeneralSecurityException;
/**
* This interface is only for Counter (CTR) mode. Generally the Encryptor
* or Decryptor calculates the IV and maintain encryption context internally.
* For example a {@link javax.crypto.Cipher} will maintain its encryption
* context internally when we do encryption/decryption using the
* Cipher#update interface.
* <p/>
* Encryption/Decryption is not always on the entire file. For example,
* in Hadoop, a node may only decrypt a portion of a file (i.e. a split).
* In these situations, the counter is derived from the file position.
* <p/>
* The IV can be calculated by combining the initial IV and the counter with
* a lossless operation (concatenation, addition, or XOR).
* @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_.28CTR.29
*
* @param initIV initial IV
* @param counter counter for input stream position
* @param IV the IV for input stream position
*/
public abstract void calculateIV(byte[] initIV, long counter, byte[] IV);
/**
* Generate a number of secure, random bytes suitable for cryptographic use.
* This method needs to be thread-safe.
*
* @param bytes byte array to populate with random data
*/
public abstract void generateSecureRandom(byte[] bytes);
}
| 7,009 | 37.306011 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.channels.ReadableByteChannel;
import java.security.GeneralSecurityException;
import java.util.EnumSet;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ByteBufferReadable;
import org.apache.hadoop.fs.CanSetDropBehind;
import org.apache.hadoop.fs.CanSetReadahead;
import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
import org.apache.hadoop.fs.HasFileDescriptor;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.io.ByteBufferPool;
import com.google.common.base.Preconditions;
/**
* CryptoInputStream decrypts data. It is not thread-safe. AES CTR mode is
* required in order to ensure that the plain text and cipher text have a 1:1
* mapping. The decryption is buffer based. The key points of the decryption
* are (1) calculating the counter and (2) padding through stream position:
* <p/>
* counter = base + pos/(algorithm blocksize);
* padding = pos%(algorithm blocksize);
* <p/>
* The underlying stream offset is maintained as state.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class CryptoInputStream extends FilterInputStream implements
Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor,
CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess,
ReadableByteChannel {
private final byte[] oneByteBuf = new byte[1];
private final CryptoCodec codec;
private final Decryptor decryptor;
private final int bufferSize;
/**
* Input data buffer. The data starts at inBuffer.position() and ends at
* to inBuffer.limit().
*/
private ByteBuffer inBuffer;
/**
* The decrypted data buffer. The data starts at outBuffer.position() and
* ends at outBuffer.limit();
*/
private ByteBuffer outBuffer;
private long streamOffset = 0; // Underlying stream offset.
/**
* Whether the underlying stream supports
* {@link org.apache.hadoop.fs.ByteBufferReadable}
*/
private Boolean usingByteBufferRead = null;
/**
* Padding = pos%(algorithm blocksize); Padding is put into {@link #inBuffer}
* before any other data goes in. The purpose of padding is to put the input
* data at proper position.
*/
private byte padding;
private boolean closed;
private final byte[] key;
private final byte[] initIV;
private byte[] iv;
private final boolean isByteBufferReadable;
private final boolean isReadableByteChannel;
/** DirectBuffer pool */
private final Queue<ByteBuffer> bufferPool =
new ConcurrentLinkedQueue<ByteBuffer>();
/** Decryptor pool */
private final Queue<Decryptor> decryptorPool =
new ConcurrentLinkedQueue<Decryptor>();
public CryptoInputStream(InputStream in, CryptoCodec codec,
int bufferSize, byte[] key, byte[] iv) throws IOException {
this(in, codec, bufferSize, key, iv,
CryptoStreamUtils.getInputStreamOffset(in));
}
public CryptoInputStream(InputStream in, CryptoCodec codec,
int bufferSize, byte[] key, byte[] iv, long streamOffset) throws IOException {
super(in);
CryptoStreamUtils.checkCodec(codec);
this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize);
this.codec = codec;
this.key = key.clone();
this.initIV = iv.clone();
this.iv = iv.clone();
this.streamOffset = streamOffset;
isByteBufferReadable = in instanceof ByteBufferReadable;
isReadableByteChannel = in instanceof ReadableByteChannel;
inBuffer = ByteBuffer.allocateDirect(this.bufferSize);
outBuffer = ByteBuffer.allocateDirect(this.bufferSize);
decryptor = getDecryptor();
resetStreamOffset(streamOffset);
}
public CryptoInputStream(InputStream in, CryptoCodec codec,
byte[] key, byte[] iv) throws IOException {
this(in, codec, CryptoStreamUtils.getBufferSize(codec.getConf()), key, iv);
}
public InputStream getWrappedStream() {
return in;
}
/**
* Decryption is buffer based.
* If there is data in {@link #outBuffer}, then read it out of this buffer.
* If there is no data in {@link #outBuffer}, then read more from the
* underlying stream and do the decryption.
* @param b the buffer into which the decrypted data is read.
* @param off the buffer offset.
* @param len the maximum number of decrypted data bytes to read.
* @return int the total number of decrypted data bytes read into the buffer.
* @throws IOException
*/
@Override
public int read(byte[] b, int off, int len) throws IOException {
checkStream();
if (b == null) {
throw new NullPointerException();
} else if (off < 0 || len < 0 || len > b.length - off) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
final int remaining = outBuffer.remaining();
if (remaining > 0) {
int n = Math.min(len, remaining);
outBuffer.get(b, off, n);
return n;
} else {
int n = 0;
/*
* Check whether the underlying stream is {@link ByteBufferReadable},
* it can avoid bytes copy.
*/
if (usingByteBufferRead == null) {
if (isByteBufferReadable || isReadableByteChannel) {
try {
n = isByteBufferReadable ?
((ByteBufferReadable) in).read(inBuffer) :
((ReadableByteChannel) in).read(inBuffer);
usingByteBufferRead = Boolean.TRUE;
} catch (UnsupportedOperationException e) {
usingByteBufferRead = Boolean.FALSE;
}
} else {
usingByteBufferRead = Boolean.FALSE;
}
if (!usingByteBufferRead) {
n = readFromUnderlyingStream(inBuffer);
}
} else {
if (usingByteBufferRead) {
n = isByteBufferReadable ? ((ByteBufferReadable) in).read(inBuffer) :
((ReadableByteChannel) in).read(inBuffer);
} else {
n = readFromUnderlyingStream(inBuffer);
}
}
if (n <= 0) {
return n;
}
streamOffset += n; // Read n bytes
decrypt(decryptor, inBuffer, outBuffer, padding);
padding = afterDecryption(decryptor, inBuffer, streamOffset, iv);
n = Math.min(len, outBuffer.remaining());
outBuffer.get(b, off, n);
return n;
}
}
/** Read data from underlying stream. */
private int readFromUnderlyingStream(ByteBuffer inBuffer) throws IOException {
final int toRead = inBuffer.remaining();
final byte[] tmp = getTmpBuf();
final int n = in.read(tmp, 0, toRead);
if (n > 0) {
inBuffer.put(tmp, 0, n);
}
return n;
}
private byte[] tmpBuf;
private byte[] getTmpBuf() {
if (tmpBuf == null) {
tmpBuf = new byte[bufferSize];
}
return tmpBuf;
}
/**
* Do the decryption using inBuffer as input and outBuffer as output.
* Upon return, inBuffer is cleared; the decrypted data starts at
* outBuffer.position() and ends at outBuffer.limit();
*/
private void decrypt(Decryptor decryptor, ByteBuffer inBuffer,
ByteBuffer outBuffer, byte padding) throws IOException {
Preconditions.checkState(inBuffer.position() >= padding);
if(inBuffer.position() == padding) {
// There is no real data in inBuffer.
return;
}
inBuffer.flip();
outBuffer.clear();
decryptor.decrypt(inBuffer, outBuffer);
inBuffer.clear();
outBuffer.flip();
if (padding > 0) {
/*
* The plain text and cipher text have a 1:1 mapping, they start at the
* same position.
*/
outBuffer.position(padding);
}
}
/**
* This method is executed immediately after decryption. Check whether
* decryptor should be updated and recalculate padding if needed.
*/
private byte afterDecryption(Decryptor decryptor, ByteBuffer inBuffer,
long position, byte[] iv) throws IOException {
byte padding = 0;
if (decryptor.isContextReset()) {
/*
* This code is generally not executed since the decryptor usually
* maintains decryption context (e.g. the counter) internally. However,
* some implementations can't maintain context so a re-init is necessary
* after each decryption call.
*/
updateDecryptor(decryptor, position, iv);
padding = getPadding(position);
inBuffer.position(padding);
}
return padding;
}
private long getCounter(long position) {
return position / codec.getCipherSuite().getAlgorithmBlockSize();
}
private byte getPadding(long position) {
return (byte)(position % codec.getCipherSuite().getAlgorithmBlockSize());
}
/** Calculate the counter and iv, update the decryptor. */
private void updateDecryptor(Decryptor decryptor, long position, byte[] iv)
throws IOException {
final long counter = getCounter(position);
codec.calculateIV(initIV, counter, iv);
decryptor.init(key, iv);
}
/**
* Reset the underlying stream offset; clear {@link #inBuffer} and
* {@link #outBuffer}. This Typically happens during {@link #seek(long)}
* or {@link #skip(long)}.
*/
private void resetStreamOffset(long offset) throws IOException {
streamOffset = offset;
inBuffer.clear();
outBuffer.clear();
outBuffer.limit(0);
updateDecryptor(decryptor, offset, iv);
padding = getPadding(offset);
inBuffer.position(padding); // Set proper position for input data.
}
@Override
public void close() throws IOException {
if (closed) {
return;
}
super.close();
freeBuffers();
closed = true;
}
/** Positioned read. It is thread-safe */
@Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
checkStream();
try {
final int n = ((PositionedReadable) in).read(position, buffer, offset,
length);
if (n > 0) {
// This operation does not change the current offset of the file
decrypt(position, buffer, offset, n);
}
return n;
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not support " +
"positioned read.");
}
}
/**
* Decrypt length bytes in buffer starting at offset. Output is also put
* into buffer starting at offset. It is thread-safe.
*/
private void decrypt(long position, byte[] buffer, int offset, int length)
throws IOException {
ByteBuffer inBuffer = getBuffer();
ByteBuffer outBuffer = getBuffer();
Decryptor decryptor = null;
try {
decryptor = getDecryptor();
byte[] iv = initIV.clone();
updateDecryptor(decryptor, position, iv);
byte padding = getPadding(position);
inBuffer.position(padding); // Set proper position for input data.
int n = 0;
while (n < length) {
int toDecrypt = Math.min(length - n, inBuffer.remaining());
inBuffer.put(buffer, offset + n, toDecrypt);
// Do decryption
decrypt(decryptor, inBuffer, outBuffer, padding);
outBuffer.get(buffer, offset + n, toDecrypt);
n += toDecrypt;
padding = afterDecryption(decryptor, inBuffer, position + n, iv);
}
} finally {
returnBuffer(inBuffer);
returnBuffer(outBuffer);
returnDecryptor(decryptor);
}
}
/** Positioned read fully. It is thread-safe */
@Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
checkStream();
try {
((PositionedReadable) in).readFully(position, buffer, offset, length);
if (length > 0) {
// This operation does not change the current offset of the file
decrypt(position, buffer, offset, length);
}
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not support " +
"positioned readFully.");
}
}
@Override
public void readFully(long position, byte[] buffer) throws IOException {
readFully(position, buffer, 0, buffer.length);
}
/** Seek to a position. */
@Override
public void seek(long pos) throws IOException {
Preconditions.checkArgument(pos >= 0, "Cannot seek to negative offset.");
checkStream();
try {
/*
* If data of target pos in the underlying stream has already been read
* and decrypted in outBuffer, we just need to re-position outBuffer.
*/
if (pos <= streamOffset && pos >= (streamOffset - outBuffer.remaining())) {
int forward = (int) (pos - (streamOffset - outBuffer.remaining()));
if (forward > 0) {
outBuffer.position(outBuffer.position() + forward);
}
} else {
((Seekable) in).seek(pos);
resetStreamOffset(pos);
}
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not support " +
"seek.");
}
}
/** Skip n bytes */
@Override
public long skip(long n) throws IOException {
Preconditions.checkArgument(n >= 0, "Negative skip length.");
checkStream();
if (n == 0) {
return 0;
} else if (n <= outBuffer.remaining()) {
int pos = outBuffer.position() + (int) n;
outBuffer.position(pos);
return n;
} else {
/*
* Subtract outBuffer.remaining() to see how many bytes we need to
* skip in the underlying stream. Add outBuffer.remaining() to the
* actual number of skipped bytes in the underlying stream to get the
* number of skipped bytes from the user's point of view.
*/
n -= outBuffer.remaining();
long skipped = in.skip(n);
if (skipped < 0) {
skipped = 0;
}
long pos = streamOffset + skipped;
skipped += outBuffer.remaining();
resetStreamOffset(pos);
return skipped;
}
}
/** Get underlying stream position. */
@Override
public long getPos() throws IOException {
checkStream();
// Equals: ((Seekable) in).getPos() - outBuffer.remaining()
return streamOffset - outBuffer.remaining();
}
/** ByteBuffer read. */
@Override
public int read(ByteBuffer buf) throws IOException {
checkStream();
if (isByteBufferReadable || isReadableByteChannel) {
final int unread = outBuffer.remaining();
if (unread > 0) { // Have unread decrypted data in buffer.
int toRead = buf.remaining();
if (toRead <= unread) {
final int limit = outBuffer.limit();
outBuffer.limit(outBuffer.position() + toRead);
buf.put(outBuffer);
outBuffer.limit(limit);
return toRead;
} else {
buf.put(outBuffer);
}
}
final int pos = buf.position();
final int n = isByteBufferReadable ? ((ByteBufferReadable) in).read(buf) :
((ReadableByteChannel) in).read(buf);
if (n > 0) {
streamOffset += n; // Read n bytes
decrypt(buf, n, pos);
}
if (n >= 0) {
return unread + n;
} else {
if (unread == 0) {
return -1;
} else {
return unread;
}
}
} else {
int n = 0;
if (buf.hasArray()) {
n = read(buf.array(), buf.position(), buf.remaining());
if (n > 0) {
buf.position(buf.position() + n);
}
} else {
byte[] tmp = new byte[buf.remaining()];
n = read(tmp);
if (n > 0) {
buf.put(tmp, 0, n);
}
}
return n;
}
}
/**
* Decrypt all data in buf: total n bytes from given start position.
* Output is also buf and same start position.
* buf.position() and buf.limit() should be unchanged after decryption.
*/
private void decrypt(ByteBuffer buf, int n, int start)
throws IOException {
final int pos = buf.position();
final int limit = buf.limit();
int len = 0;
while (len < n) {
buf.position(start + len);
buf.limit(start + len + Math.min(n - len, inBuffer.remaining()));
inBuffer.put(buf);
// Do decryption
try {
decrypt(decryptor, inBuffer, outBuffer, padding);
buf.position(start + len);
buf.limit(limit);
len += outBuffer.remaining();
buf.put(outBuffer);
} finally {
padding = afterDecryption(decryptor, inBuffer, streamOffset - (n - len), iv);
}
}
buf.position(pos);
}
@Override
public int available() throws IOException {
checkStream();
return in.available() + outBuffer.remaining();
}
@Override
public boolean markSupported() {
return false;
}
@Override
public void mark(int readLimit) {
}
@Override
public void reset() throws IOException {
throw new IOException("Mark/reset not supported");
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
Preconditions.checkArgument(targetPos >= 0,
"Cannot seek to negative offset.");
checkStream();
try {
boolean result = ((Seekable) in).seekToNewSource(targetPos);
resetStreamOffset(targetPos);
return result;
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not support " +
"seekToNewSource.");
}
}
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
EnumSet<ReadOption> opts) throws IOException,
UnsupportedOperationException {
checkStream();
try {
if (outBuffer.remaining() > 0) {
// Have some decrypted data unread, need to reset.
((Seekable) in).seek(getPos());
resetStreamOffset(getPos());
}
final ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).
read(bufferPool, maxLength, opts);
if (buffer != null) {
final int n = buffer.remaining();
if (n > 0) {
streamOffset += buffer.remaining(); // Read n bytes
final int pos = buffer.position();
decrypt(buffer, n, pos);
}
}
return buffer;
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not support " +
"enhanced byte buffer access.");
}
}
@Override
public void releaseBuffer(ByteBuffer buffer) {
try {
((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not support " +
"release buffer.");
}
}
@Override
public void setReadahead(Long readahead) throws IOException,
UnsupportedOperationException {
try {
((CanSetReadahead) in).setReadahead(readahead);
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not support " +
"setting the readahead caching strategy.");
}
}
@Override
public void setDropBehind(Boolean dropCache) throws IOException,
UnsupportedOperationException {
try {
((CanSetDropBehind) in).setDropBehind(dropCache);
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not " +
"support setting the drop-behind caching setting.");
}
}
@Override
public FileDescriptor getFileDescriptor() throws IOException {
if (in instanceof HasFileDescriptor) {
return ((HasFileDescriptor) in).getFileDescriptor();
} else if (in instanceof FileInputStream) {
return ((FileInputStream) in).getFD();
} else {
return null;
}
}
@Override
public int read() throws IOException {
return (read(oneByteBuf, 0, 1) == -1) ? -1 : (oneByteBuf[0] & 0xff);
}
private void checkStream() throws IOException {
if (closed) {
throw new IOException("Stream closed");
}
}
/** Get direct buffer from pool */
private ByteBuffer getBuffer() {
ByteBuffer buffer = bufferPool.poll();
if (buffer == null) {
buffer = ByteBuffer.allocateDirect(bufferSize);
}
return buffer;
}
/** Return direct buffer to pool */
private void returnBuffer(ByteBuffer buf) {
if (buf != null) {
buf.clear();
bufferPool.add(buf);
}
}
/** Forcibly free the direct buffers. */
private void freeBuffers() {
CryptoStreamUtils.freeDB(inBuffer);
CryptoStreamUtils.freeDB(outBuffer);
cleanBufferPool();
}
/** Clean direct buffer pool */
private void cleanBufferPool() {
ByteBuffer buf;
while ((buf = bufferPool.poll()) != null) {
CryptoStreamUtils.freeDB(buf);
}
}
/** Get decryptor from pool */
private Decryptor getDecryptor() throws IOException {
Decryptor decryptor = decryptorPool.poll();
if (decryptor == null) {
try {
decryptor = codec.createDecryptor();
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
}
return decryptor;
}
/** Return decryptor to pool */
private void returnDecryptor(Decryptor decryptor) {
if (decryptor != null) {
decryptorPool.add(decryptor);
}
}
@Override
public boolean isOpen() {
return !closed;
}
}
| 22,467 | 30.292479 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.nio.ByteBuffer;
import java.security.NoSuchAlgorithmException;
import java.util.StringTokenizer;
import javax.crypto.BadPaddingException;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.ShortBufferException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.base.Preconditions;
/**
* OpenSSL cipher using JNI.
* Currently only AES-CTR is supported. It's flexible to add
* other crypto algorithms/modes.
*/
@InterfaceAudience.Private
public final class OpensslCipher {
private static final Log LOG =
LogFactory.getLog(OpensslCipher.class.getName());
public static final int ENCRYPT_MODE = 1;
public static final int DECRYPT_MODE = 0;
/** Currently only support AES/CTR/NoPadding. */
private static enum AlgMode {
AES_CTR;
static int get(String algorithm, String mode)
throws NoSuchAlgorithmException {
try {
return AlgMode.valueOf(algorithm + "_" + mode).ordinal();
} catch (Exception e) {
throw new NoSuchAlgorithmException("Doesn't support algorithm: " +
algorithm + " and mode: " + mode);
}
}
}
private static enum Padding {
NoPadding;
static int get(String padding) throws NoSuchPaddingException {
try {
return Padding.valueOf(padding).ordinal();
} catch (Exception e) {
throw new NoSuchPaddingException("Doesn't support padding: " + padding);
}
}
}
private long context = 0;
private final int alg;
private final int padding;
private static final String loadingFailureReason;
static {
String loadingFailure = null;
try {
if (!NativeCodeLoader.buildSupportsOpenssl()) {
loadingFailure = "build does not support openssl.";
} else {
initIDs();
}
} catch (Throwable t) {
loadingFailure = t.getMessage();
LOG.debug("Failed to load OpenSSL Cipher.", t);
} finally {
loadingFailureReason = loadingFailure;
}
}
public static String getLoadingFailureReason() {
return loadingFailureReason;
}
private OpensslCipher(long context, int alg, int padding) {
this.context = context;
this.alg = alg;
this.padding = padding;
}
/**
* Return an <code>OpensslCipher<code> object that implements the specified
* transformation.
*
* @param transformation the name of the transformation, e.g.,
* AES/CTR/NoPadding.
* @return OpensslCipher an <code>OpensslCipher<code> object
* @throws NoSuchAlgorithmException if <code>transformation</code> is null,
* empty, in an invalid format, or if Openssl doesn't implement the
* specified algorithm.
* @throws NoSuchPaddingException if <code>transformation</code> contains
* a padding scheme that is not available.
*/
public static final OpensslCipher getInstance(String transformation)
throws NoSuchAlgorithmException, NoSuchPaddingException {
Transform transform = tokenizeTransformation(transformation);
int algMode = AlgMode.get(transform.alg, transform.mode);
int padding = Padding.get(transform.padding);
long context = initContext(algMode, padding);
return new OpensslCipher(context, algMode, padding);
}
/** Nested class for algorithm, mode and padding. */
private static class Transform {
final String alg;
final String mode;
final String padding;
public Transform(String alg, String mode, String padding) {
this.alg = alg;
this.mode = mode;
this.padding = padding;
}
}
private static Transform tokenizeTransformation(String transformation)
throws NoSuchAlgorithmException {
if (transformation == null) {
throw new NoSuchAlgorithmException("No transformation given.");
}
/*
* Array containing the components of a Cipher transformation:
*
* index 0: algorithm (e.g., AES)
* index 1: mode (e.g., CTR)
* index 2: padding (e.g., NoPadding)
*/
String[] parts = new String[3];
int count = 0;
StringTokenizer parser = new StringTokenizer(transformation, "/");
while (parser.hasMoreTokens() && count < 3) {
parts[count++] = parser.nextToken().trim();
}
if (count != 3 || parser.hasMoreTokens()) {
throw new NoSuchAlgorithmException("Invalid transformation format: " +
transformation);
}
return new Transform(parts[0], parts[1], parts[2]);
}
/**
* Initialize this cipher with a key and IV.
*
* @param mode {@link #ENCRYPT_MODE} or {@link #DECRYPT_MODE}
* @param key crypto key
* @param iv crypto iv
*/
public void init(int mode, byte[] key, byte[] iv) {
context = init(context, mode, alg, padding, key, iv);
}
/**
* Continues a multiple-part encryption or decryption operation. The data
* is encrypted or decrypted, depending on how this cipher was initialized.
* <p/>
*
* All <code>input.remaining()</code> bytes starting at
* <code>input.position()</code> are processed. The result is stored in
* the output buffer.
* <p/>
*
* Upon return, the input buffer's position will be equal to its limit;
* its limit will not have changed. The output buffer's position will have
* advanced by n, when n is the value returned by this method; the output
* buffer's limit will not have changed.
* <p/>
*
* If <code>output.remaining()</code> bytes are insufficient to hold the
* result, a <code>ShortBufferException</code> is thrown.
*
* @param input the input ByteBuffer
* @param output the output ByteBuffer
* @return int number of bytes stored in <code>output</code>
* @throws ShortBufferException if there is insufficient space in the
* output buffer
*/
public int update(ByteBuffer input, ByteBuffer output)
throws ShortBufferException {
checkState();
Preconditions.checkArgument(input.isDirect() && output.isDirect(),
"Direct buffers are required.");
int len = update(context, input, input.position(), input.remaining(),
output, output.position(), output.remaining());
input.position(input.limit());
output.position(output.position() + len);
return len;
}
/**
* Finishes a multiple-part operation. The data is encrypted or decrypted,
* depending on how this cipher was initialized.
* <p/>
*
* The result is stored in the output buffer. Upon return, the output buffer's
* position will have advanced by n, where n is the value returned by this
* method; the output buffer's limit will not have changed.
* <p/>
*
* If <code>output.remaining()</code> bytes are insufficient to hold the result,
* a <code>ShortBufferException</code> is thrown.
* <p/>
*
* Upon finishing, this method resets this cipher object to the state it was
* in when previously initialized. That is, the object is available to encrypt
* or decrypt more data.
* <p/>
*
* If any exception is thrown, this cipher object need to be reset before it
* can be used again.
*
* @param output the output ByteBuffer
* @return int number of bytes stored in <code>output</code>
* @throws ShortBufferException
* @throws IllegalBlockSizeException
* @throws BadPaddingException
*/
public int doFinal(ByteBuffer output) throws ShortBufferException,
IllegalBlockSizeException, BadPaddingException {
checkState();
Preconditions.checkArgument(output.isDirect(), "Direct buffer is required.");
int len = doFinal(context, output, output.position(), output.remaining());
output.position(output.position() + len);
return len;
}
/** Forcibly clean the context. */
public void clean() {
if (context != 0) {
clean(context);
context = 0;
}
}
/** Check whether context is initialized. */
private void checkState() {
Preconditions.checkState(context != 0);
}
@Override
protected void finalize() throws Throwable {
clean();
}
private native static void initIDs();
private native static long initContext(int alg, int padding);
private native long init(long context, int mode, int alg, int padding,
byte[] key, byte[] iv);
private native int update(long context, ByteBuffer input, int inputOffset,
int inputLength, ByteBuffer output, int outputOffset, int maxOutputLength);
private native int doFinal(long context, ByteBuffer output, int offset,
int maxOutputLength);
private native void clean(long context);
public native static String getLibraryName();
}
| 9,620 | 32.40625 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Preconditions;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class AesCtrCryptoCodec extends CryptoCodec {
protected static final CipherSuite SUITE = CipherSuite.AES_CTR_NOPADDING;
/**
* For AES, the algorithm block is fixed size of 128 bits.
* @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
*/
private static final int AES_BLOCK_SIZE = SUITE.getAlgorithmBlockSize();
@Override
public CipherSuite getCipherSuite() {
return SUITE;
}
/**
* The IV is produced by adding the initial IV to the counter. IV length
* should be the same as {@link #AES_BLOCK_SIZE}
*/
@Override
public void calculateIV(byte[] initIV, long counter, byte[] IV) {
Preconditions.checkArgument(initIV.length == AES_BLOCK_SIZE);
Preconditions.checkArgument(IV.length == AES_BLOCK_SIZE);
int i = IV.length; // IV length
int j = 0; // counter bytes index
int sum = 0;
while (i-- > 0) {
// (sum >>> Byte.SIZE) is the carry for addition
sum = (initIV[i] & 0xff) + (sum >>> Byte.SIZE);
if (j++ < 8) { // Big-endian, and long is 8 bytes length
sum += (byte) counter & 0xff;
counter >>>= 8;
}
IV[i] = (byte) sum;
}
}
}
| 2,239 | 33.461538 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.StringUtils;
/**
* Defines properties of a CipherSuite. Modeled after the ciphers in
* {@link javax.crypto.Cipher}.
*/
@InterfaceAudience.Private
public enum CipherSuite {
UNKNOWN("Unknown", 0),
AES_CTR_NOPADDING("AES/CTR/NoPadding", 16);
private final String name;
private final int algoBlockSize;
private Integer unknownValue = null;
CipherSuite(String name, int algoBlockSize) {
this.name = name;
this.algoBlockSize = algoBlockSize;
}
public void setUnknownValue(int unknown) {
this.unknownValue = unknown;
}
public int getUnknownValue() {
return unknownValue;
}
/**
* @return name of cipher suite, as in {@link javax.crypto.Cipher}
*/
public String getName() {
return name;
}
/**
* @return size of an algorithm block in bytes
*/
public int getAlgorithmBlockSize() {
return algoBlockSize;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder("{");
builder.append("name: " + name);
builder.append(", algorithmBlockSize: " + algoBlockSize);
if (unknownValue != null) {
builder.append(", unknownValue: " + unknownValue);
}
builder.append("}");
return builder.toString();
}
/**
* Convert to CipherSuite from name, {@link #algoBlockSize} is fixed for
* certain cipher suite, just need to compare the name.
* @param name cipher suite name
* @return CipherSuite cipher suite
*/
public static CipherSuite convert(String name) {
CipherSuite[] suites = CipherSuite.values();
for (CipherSuite suite : suites) {
if (suite.getName().equals(name)) {
return suite;
}
}
throw new IllegalArgumentException("Invalid cipher suite name: " + name);
}
/**
* Returns suffix of cipher suite configuration.
* @return String configuration suffix
*/
public String getConfigSuffix() {
String[] parts = name.split("/");
StringBuilder suffix = new StringBuilder();
for (String part : parts) {
suffix.append(".").append(StringUtils.toLowerCase(part));
}
return suffix.toString();
}
}
| 3,047 | 27.485981 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.GeneralSecurityException;
import java.security.SecureRandom;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Preconditions;
import org.apache.hadoop.crypto.random.OsSecureRandom;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Implement the AES-CTR crypto codec using JNI into OpenSSL.
*/
@InterfaceAudience.Private
public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec {
private static final Log LOG =
LogFactory.getLog(OpensslAesCtrCryptoCodec.class.getName());
private Configuration conf;
private Random random;
public OpensslAesCtrCryptoCodec() {
String loadingFailureReason = OpensslCipher.getLoadingFailureReason();
if (loadingFailureReason != null) {
throw new RuntimeException(loadingFailureReason);
}
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
final Class<? extends Random> klass = conf.getClass(
HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY, OsSecureRandom.class,
Random.class);
try {
random = ReflectionUtils.newInstance(klass, conf);
} catch (Exception e) {
LOG.info("Unable to use " + klass.getName() + ". Falling back to " +
"Java SecureRandom.", e);
this.random = new SecureRandom();
}
}
@Override
protected void finalize() throws Throwable {
try {
Closeable r = (Closeable) this.random;
r.close();
} catch (ClassCastException e) {
}
super.finalize();
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public Encryptor createEncryptor() throws GeneralSecurityException {
return new OpensslAesCtrCipher(OpensslCipher.ENCRYPT_MODE);
}
@Override
public Decryptor createDecryptor() throws GeneralSecurityException {
return new OpensslAesCtrCipher(OpensslCipher.DECRYPT_MODE);
}
@Override
public void generateSecureRandom(byte[] bytes) {
random.nextBytes(bytes);
}
private static class OpensslAesCtrCipher implements Encryptor, Decryptor {
private final OpensslCipher cipher;
private final int mode;
private boolean contextReset = false;
public OpensslAesCtrCipher(int mode) throws GeneralSecurityException {
this.mode = mode;
cipher = OpensslCipher.getInstance(SUITE.getName());
}
@Override
public void init(byte[] key, byte[] iv) throws IOException {
Preconditions.checkNotNull(key);
Preconditions.checkNotNull(iv);
contextReset = false;
cipher.init(mode, key, iv);
}
/**
* AES-CTR will consume all of the input data. It requires enough space in
* the destination buffer to encrypt entire input buffer.
*/
@Override
public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
throws IOException {
process(inBuffer, outBuffer);
}
/**
* AES-CTR will consume all of the input data. It requires enough space in
* the destination buffer to decrypt entire input buffer.
*/
@Override
public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
throws IOException {
process(inBuffer, outBuffer);
}
private void process(ByteBuffer inBuffer, ByteBuffer outBuffer)
throws IOException {
try {
int inputSize = inBuffer.remaining();
// OpensslCipher#update will maintain crypto context.
int n = cipher.update(inBuffer, outBuffer);
if (n < inputSize) {
/**
* Typically code will not get here. OpensslCipher#update will
* consume all input data and put result in outBuffer.
* OpensslCipher#doFinal will reset the crypto context.
*/
contextReset = true;
cipher.doFinal(outBuffer);
}
} catch (Exception e) {
throw new IOException(e);
}
}
@Override
public boolean isContextReset() {
return contextReset;
}
}
}
| 5,179 | 30.393939 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoProtocolVersion.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Versions of the client/server protocol used for HDFS encryption.
*/
@InterfaceAudience.Private
public enum CryptoProtocolVersion {
UNKNOWN("Unknown", 1),
ENCRYPTION_ZONES("Encryption zones", 2);
private final String description;
private final int version;
private Integer unknownValue = null;
private static CryptoProtocolVersion[] supported = {ENCRYPTION_ZONES};
/**
* @return Array of supported protocol versions.
*/
public static CryptoProtocolVersion[] supported() {
return supported;
}
CryptoProtocolVersion(String description, int version) {
this.description = description;
this.version = version;
}
/**
* Returns if a given protocol version is supported.
*
* @param version version number
* @return true if the version is supported, else false
*/
public static boolean supports(CryptoProtocolVersion version) {
if (version.getVersion() == UNKNOWN.getVersion()) {
return false;
}
for (CryptoProtocolVersion v : CryptoProtocolVersion.values()) {
if (v.getVersion() == version.getVersion()) {
return true;
}
}
return false;
}
public void setUnknownValue(int unknown) {
this.unknownValue = unknown;
}
public int getUnknownValue() {
return unknownValue;
}
public String getDescription() {
return description;
}
public int getVersion() {
return version;
}
@Override
public String toString() {
return "CryptoProtocolVersion{" +
"description='" + description + '\'' +
", version=" + version +
", unknownValue=" + unknownValue +
'}';
}
}
| 2,537 | 26.89011 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/UnsupportedCodecException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
/**
* Thrown to indicate that the specific codec is not supported.
*/
public class UnsupportedCodecException extends RuntimeException {
/** Default constructor */
public UnsupportedCodecException() {
}
/**
* Constructs an UnsupportedCodecException with the specified
* detail message.
*
* @param message the detail message
*/
public UnsupportedCodecException(String message) {
super(message);
}
/**
* Constructs a new exception with the specified detail message and
* cause.
*
* @param message the detail message
* @param cause the cause
*/
public UnsupportedCodecException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructs a new exception with the specified cause.
*
* @param cause the cause
*/
public UnsupportedCodecException(Throwable cause) {
super(cause);
}
private static final long serialVersionUID = 6713920435487942224L;
}
| 1,794 | 28.916667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherOption.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Used between client and server to negotiate the
* cipher suite, key and iv.
*/
@InterfaceAudience.Private
public class CipherOption {
private final CipherSuite suite;
private final byte[] inKey;
private final byte[] inIv;
private final byte[] outKey;
private final byte[] outIv;
public CipherOption(CipherSuite suite) {
this(suite, null, null, null, null);
}
public CipherOption(CipherSuite suite, byte[] inKey, byte[] inIv,
byte[] outKey, byte[] outIv) {
this.suite = suite;
this.inKey = inKey;
this.inIv = inIv;
this.outKey = outKey;
this.outIv = outIv;
}
public CipherSuite getCipherSuite() {
return suite;
}
public byte[] getInKey() {
return inKey;
}
public byte[] getInIv() {
return inIv;
}
public byte[] getOutKey() {
return outKey;
}
public byte[] getOutIv() {
return outIv;
}
}
| 1,808 | 26 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Seekable;
import com.google.common.base.Preconditions;
@InterfaceAudience.Private
public class CryptoStreamUtils {
private static final int MIN_BUFFER_SIZE = 512;
/** Forcibly free the direct buffer. */
public static void freeDB(ByteBuffer buffer) {
if (buffer instanceof sun.nio.ch.DirectBuffer) {
final sun.misc.Cleaner bufferCleaner =
((sun.nio.ch.DirectBuffer) buffer).cleaner();
bufferCleaner.clean();
}
}
/** Read crypto buffer size */
public static int getBufferSize(Configuration conf) {
return conf.getInt(HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY,
HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT);
}
/** AES/CTR/NoPadding is required */
public static void checkCodec(CryptoCodec codec) {
if (codec.getCipherSuite() != CipherSuite.AES_CTR_NOPADDING) {
throw new UnsupportedCodecException("AES/CTR/NoPadding is required");
}
}
/** Check and floor buffer size */
public static int checkBufferSize(CryptoCodec codec, int bufferSize) {
Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE,
"Minimum value of buffer size is " + MIN_BUFFER_SIZE + ".");
return bufferSize - bufferSize % codec.getCipherSuite()
.getAlgorithmBlockSize();
}
/**
* If input stream is {@link org.apache.hadoop.fs.Seekable}, return it's
* current position, otherwise return 0;
*/
public static long getInputStreamOffset(InputStream in) throws IOException {
if (in instanceof Seekable) {
return ((Seekable) in).getPos();
}
return 0;
}
}
| 2,842 | 35.448718 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface Encryptor {
/**
* Initialize the encryptor and the internal encryption context.
* @param key encryption key.
* @param iv encryption initialization vector
* @throws IOException if initialization fails
*/
public void init(byte[] key, byte[] iv) throws IOException;
/**
* Indicate whether the encryption context is reset.
* <p/>
* Certain modes, like CTR, require a different IV depending on the
* position in the stream. Generally, the encryptor maintains any necessary
* context for calculating the IV and counter so that no reinit is necessary
* during the encryption. Reinit before each operation is inefficient.
* @return boolean whether context is reset.
*/
public boolean isContextReset();
/**
* This presents a direct interface encrypting with direct ByteBuffers.
* <p/>
* This function does not always encrypt the entire buffer and may potentially
* need to be called multiple times to process an entire buffer. The object
* may hold the encryption context internally.
* <p/>
* Some implementations may require sufficient space in the destination
* buffer to encrypt the entire input buffer.
* <p/>
* Upon return, inBuffer.position() will be advanced by the number of bytes
* read and outBuffer.position() by bytes written. Implementations should
* not modify inBuffer.limit() and outBuffer.limit().
* <p/>
* @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may
* not be null and inBuffer.remaining() must be > 0
* @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may
* not be null and outBuffer.remaining() must be > 0
* @throws IOException if encryption fails
*/
public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
throws IOException;
}
| 2,914 | 39.486111 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.security.GeneralSecurityException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CanSetDropBehind;
import org.apache.hadoop.fs.Syncable;
import com.google.common.base.Preconditions;
/**
* CryptoOutputStream encrypts data. It is not thread-safe. AES CTR mode is
* required in order to ensure that the plain text and cipher text have a 1:1
* mapping. The encryption is buffer based. The key points of the encryption are
* (1) calculating counter and (2) padding through stream position.
* <p/>
* counter = base + pos/(algorithm blocksize);
* padding = pos%(algorithm blocksize);
* <p/>
* The underlying stream offset is maintained as state.
*
* Note that while some of this class' methods are synchronized, this is just to
* match the threadsafety behavior of DFSOutputStream. See HADOOP-11710.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class CryptoOutputStream extends FilterOutputStream implements
Syncable, CanSetDropBehind {
private final byte[] oneByteBuf = new byte[1];
private final CryptoCodec codec;
private final Encryptor encryptor;
private final int bufferSize;
/**
* Input data buffer. The data starts at inBuffer.position() and ends at
* inBuffer.limit().
*/
private ByteBuffer inBuffer;
/**
* Encrypted data buffer. The data starts at outBuffer.position() and ends at
* outBuffer.limit();
*/
private ByteBuffer outBuffer;
private long streamOffset = 0; // Underlying stream offset.
/**
* Padding = pos%(algorithm blocksize); Padding is put into {@link #inBuffer}
* before any other data goes in. The purpose of padding is to put input data
* at proper position.
*/
private byte padding;
private boolean closed;
private final byte[] key;
private final byte[] initIV;
private byte[] iv;
public CryptoOutputStream(OutputStream out, CryptoCodec codec,
int bufferSize, byte[] key, byte[] iv) throws IOException {
this(out, codec, bufferSize, key, iv, 0);
}
public CryptoOutputStream(OutputStream out, CryptoCodec codec,
int bufferSize, byte[] key, byte[] iv, long streamOffset)
throws IOException {
super(out);
CryptoStreamUtils.checkCodec(codec);
this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize);
this.codec = codec;
this.key = key.clone();
this.initIV = iv.clone();
this.iv = iv.clone();
inBuffer = ByteBuffer.allocateDirect(this.bufferSize);
outBuffer = ByteBuffer.allocateDirect(this.bufferSize);
this.streamOffset = streamOffset;
try {
encryptor = codec.createEncryptor();
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
updateEncryptor();
}
public CryptoOutputStream(OutputStream out, CryptoCodec codec,
byte[] key, byte[] iv) throws IOException {
this(out, codec, key, iv, 0);
}
public CryptoOutputStream(OutputStream out, CryptoCodec codec,
byte[] key, byte[] iv, long streamOffset) throws IOException {
this(out, codec, CryptoStreamUtils.getBufferSize(codec.getConf()),
key, iv, streamOffset);
}
public OutputStream getWrappedStream() {
return out;
}
/**
* Encryption is buffer based.
* If there is enough room in {@link #inBuffer}, then write to this buffer.
* If {@link #inBuffer} is full, then do encryption and write data to the
* underlying stream.
* @param b the data.
* @param off the start offset in the data.
* @param len the number of bytes to write.
* @throws IOException
*/
@Override
public synchronized void write(byte[] b, int off, int len) throws IOException {
checkStream();
if (b == null) {
throw new NullPointerException();
} else if (off < 0 || len < 0 || off > b.length ||
len > b.length - off) {
throw new IndexOutOfBoundsException();
}
while (len > 0) {
final int remaining = inBuffer.remaining();
if (len < remaining) {
inBuffer.put(b, off, len);
len = 0;
} else {
inBuffer.put(b, off, remaining);
off += remaining;
len -= remaining;
encrypt();
}
}
}
/**
* Do the encryption, input is {@link #inBuffer} and output is
* {@link #outBuffer}.
*/
private void encrypt() throws IOException {
Preconditions.checkState(inBuffer.position() >= padding);
if (inBuffer.position() == padding) {
// There is no real data in the inBuffer.
return;
}
inBuffer.flip();
outBuffer.clear();
encryptor.encrypt(inBuffer, outBuffer);
inBuffer.clear();
outBuffer.flip();
if (padding > 0) {
/*
* The plain text and cipher text have a 1:1 mapping, they start at the
* same position.
*/
outBuffer.position(padding);
padding = 0;
}
final int len = outBuffer.remaining();
/*
* If underlying stream supports {@link ByteBuffer} write in future, needs
* refine here.
*/
final byte[] tmp = getTmpBuf();
outBuffer.get(tmp, 0, len);
out.write(tmp, 0, len);
streamOffset += len;
if (encryptor.isContextReset()) {
/*
* This code is generally not executed since the encryptor usually
* maintains encryption context (e.g. the counter) internally. However,
* some implementations can't maintain context so a re-init is necessary
* after each encryption call.
*/
updateEncryptor();
}
}
/** Update the {@link #encryptor}: calculate counter and {@link #padding}. */
private void updateEncryptor() throws IOException {
final long counter =
streamOffset / codec.getCipherSuite().getAlgorithmBlockSize();
padding =
(byte)(streamOffset % codec.getCipherSuite().getAlgorithmBlockSize());
inBuffer.position(padding); // Set proper position for input data.
codec.calculateIV(initIV, counter, iv);
encryptor.init(key, iv);
}
private byte[] tmpBuf;
private byte[] getTmpBuf() {
if (tmpBuf == null) {
tmpBuf = new byte[bufferSize];
}
return tmpBuf;
}
@Override
public synchronized void close() throws IOException {
if (closed) {
return;
}
try {
super.close();
freeBuffers();
} finally {
closed = true;
}
}
/**
* To flush, we need to encrypt the data in the buffer and write to the
* underlying stream, then do the flush.
*/
@Override
public synchronized void flush() throws IOException {
checkStream();
encrypt();
super.flush();
}
@Override
public void write(int b) throws IOException {
oneByteBuf[0] = (byte)(b & 0xff);
write(oneByteBuf, 0, oneByteBuf.length);
}
private void checkStream() throws IOException {
if (closed) {
throw new IOException("Stream closed");
}
}
@Override
public void setDropBehind(Boolean dropCache) throws IOException,
UnsupportedOperationException {
try {
((CanSetDropBehind) out).setDropBehind(dropCache);
} catch (ClassCastException e) {
throw new UnsupportedOperationException("This stream does not " +
"support setting the drop-behind caching.");
}
}
@Override
@Deprecated
public void sync() throws IOException {
hflush();
}
@Override
public void hflush() throws IOException {
flush();
if (out instanceof Syncable) {
((Syncable)out).hflush();
}
}
@Override
public void hsync() throws IOException {
flush();
if (out instanceof Syncable) {
((Syncable)out).hsync();
}
}
/** Forcibly free the direct buffers. */
private void freeBuffers() {
CryptoStreamUtils.freeDB(inBuffer);
CryptoStreamUtils.freeDB(outBuffer);
}
}
| 8,825 | 29.122867 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.GeneralSecurityException;
import java.security.SecureRandom;
import javax.crypto.Cipher;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import com.google.common.base.Preconditions;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT;
/**
* Implement the AES-CTR crypto codec using JCE provider.
*/
@InterfaceAudience.Private
public class JceAesCtrCryptoCodec extends AesCtrCryptoCodec {
private static final Log LOG =
LogFactory.getLog(JceAesCtrCryptoCodec.class.getName());
private Configuration conf;
private String provider;
private SecureRandom random;
public JceAesCtrCryptoCodec() {
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
provider = conf.get(HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY);
final String secureRandomAlg = conf.get(
HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY,
HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT);
try {
random = (provider != null) ?
SecureRandom.getInstance(secureRandomAlg, provider) :
SecureRandom.getInstance(secureRandomAlg);
} catch (GeneralSecurityException e) {
LOG.warn(e.getMessage());
random = new SecureRandom();
}
}
@Override
public Encryptor createEncryptor() throws GeneralSecurityException {
return new JceAesCtrCipher(Cipher.ENCRYPT_MODE, provider);
}
@Override
public Decryptor createDecryptor() throws GeneralSecurityException {
return new JceAesCtrCipher(Cipher.DECRYPT_MODE, provider);
}
@Override
public void generateSecureRandom(byte[] bytes) {
random.nextBytes(bytes);
}
private static class JceAesCtrCipher implements Encryptor, Decryptor {
private final Cipher cipher;
private final int mode;
private boolean contextReset = false;
public JceAesCtrCipher(int mode, String provider)
throws GeneralSecurityException {
this.mode = mode;
if (provider == null || provider.isEmpty()) {
cipher = Cipher.getInstance(SUITE.getName());
} else {
cipher = Cipher.getInstance(SUITE.getName(), provider);
}
}
@Override
public void init(byte[] key, byte[] iv) throws IOException {
Preconditions.checkNotNull(key);
Preconditions.checkNotNull(iv);
contextReset = false;
try {
cipher.init(mode, new SecretKeySpec(key, "AES"),
new IvParameterSpec(iv));
} catch (Exception e) {
throw new IOException(e);
}
}
/**
* AES-CTR will consume all of the input data. It requires enough space in
* the destination buffer to encrypt entire input buffer.
*/
@Override
public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
throws IOException {
process(inBuffer, outBuffer);
}
/**
* AES-CTR will consume all of the input data. It requires enough space in
* the destination buffer to decrypt entire input buffer.
*/
@Override
public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
throws IOException {
process(inBuffer, outBuffer);
}
private void process(ByteBuffer inBuffer, ByteBuffer outBuffer)
throws IOException {
try {
int inputSize = inBuffer.remaining();
// Cipher#update will maintain crypto context.
int n = cipher.update(inBuffer, outBuffer);
if (n < inputSize) {
/**
* Typically code will not get here. Cipher#update will consume all
* input data and put result in outBuffer.
* Cipher#doFinal will reset the crypto context.
*/
contextReset = true;
cipher.doFinal(inBuffer, outBuffer);
}
} catch (Exception e) {
throw new IOException(e);
}
}
@Override
public boolean isContextReset() {
return contextReset;
}
}
}
| 5,403 | 31.554217 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface Decryptor {
/**
* Initialize the decryptor and the internal decryption context.
* reset.
* @param key decryption key.
* @param iv decryption initialization vector
* @throws IOException if initialization fails
*/
public void init(byte[] key, byte[] iv) throws IOException;
/**
* Indicate whether the decryption context is reset.
* <p/>
* Certain modes, like CTR, require a different IV depending on the
* position in the stream. Generally, the decryptor maintains any necessary
* context for calculating the IV and counter so that no reinit is necessary
* during the decryption. Reinit before each operation is inefficient.
* @return boolean whether context is reset.
*/
public boolean isContextReset();
/**
* This presents a direct interface decrypting with direct ByteBuffers.
* <p/>
* This function does not always decrypt the entire buffer and may potentially
* need to be called multiple times to process an entire buffer. The object
* may hold the decryption context internally.
* <p/>
* Some implementations may require sufficient space in the destination
* buffer to decrypt the entire input buffer.
* <p/>
* Upon return, inBuffer.position() will be advanced by the number of bytes
* read and outBuffer.position() by bytes written. Implementations should
* not modify inBuffer.limit() and outBuffer.limit().
* <p/>
* @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may
* not be null and inBuffer.remaining() must be > 0
* @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may
* not be null and outBuffer.remaining() must be > 0
* @throws IOException if decryption fails
*/
public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
throws IOException;
}
| 2,929 | 39.136986 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.random;
import java.io.Closeable;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT;
/**
* A Random implementation that uses random bytes sourced from the
* operating system.
*/
@InterfaceAudience.Private
public class OsSecureRandom extends Random implements Closeable, Configurable {
public static final Log LOG = LogFactory.getLog(OsSecureRandom.class);
private static final long serialVersionUID = 6391500337172057900L;
private transient Configuration conf;
private final int RESERVOIR_LENGTH = 8192;
private String randomDevPath;
private transient FileInputStream stream;
private final byte[] reservoir = new byte[RESERVOIR_LENGTH];
private int pos = reservoir.length;
private void fillReservoir(int min) {
if (pos >= reservoir.length - min) {
try {
if (stream == null) {
stream = new FileInputStream(new File(randomDevPath));
}
IOUtils.readFully(stream, reservoir, 0, reservoir.length);
} catch (IOException e) {
throw new RuntimeException("failed to fill reservoir", e);
}
pos = 0;
}
}
public OsSecureRandom() {
}
@Override
synchronized public void setConf(Configuration conf) {
this.conf = conf;
this.randomDevPath = conf.get(
HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY,
HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT);
close();
}
@Override
synchronized public Configuration getConf() {
return conf;
}
@Override
synchronized public void nextBytes(byte[] bytes) {
int off = 0;
int n = 0;
while (off < bytes.length) {
fillReservoir(0);
n = Math.min(bytes.length - off, reservoir.length - pos);
System.arraycopy(reservoir, pos, bytes, off, n);
off += n;
pos += n;
}
}
@Override
synchronized protected int next(int nbits) {
fillReservoir(4);
int n = 0;
for (int i = 0; i < 4; i++) {
n = ((n << 8) | (reservoir[pos++] & 0xff));
}
return n & (0xffffffff >> (32 - nbits));
}
@Override
synchronized public void close() {
if (stream != null) {
IOUtils.cleanup(LOG, stream);
stream = null;
}
}
}
| 3,589 | 28.916667 | 120 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.random;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.base.Preconditions;
/**
* OpenSSL secure random using JNI.
* This implementation is thread-safe.
* <p/>
*
* If using an Intel chipset with RDRAND, the high-performance hardware
* random number generator will be used and it's much faster than
* {@link java.security.SecureRandom}. If RDRAND is unavailable, default
* OpenSSL secure random generator will be used. It's still faster
* and can generate strong random bytes.
* <p/>
* @see https://wiki.openssl.org/index.php/Random_Numbers
* @see http://en.wikipedia.org/wiki/RdRand
*/
@InterfaceAudience.Private
public class OpensslSecureRandom extends Random {
private static final long serialVersionUID = -7828193502768789584L;
private static final Log LOG =
LogFactory.getLog(OpensslSecureRandom.class.getName());
/** If native SecureRandom unavailable, use java SecureRandom */
private java.security.SecureRandom fallback = null;
private static boolean nativeEnabled = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded() &&
NativeCodeLoader.buildSupportsOpenssl()) {
try {
initSR();
nativeEnabled = true;
} catch (Throwable t) {
LOG.error("Failed to load Openssl SecureRandom", t);
}
}
}
public static boolean isNativeCodeLoaded() {
return nativeEnabled;
}
public OpensslSecureRandom() {
if (!nativeEnabled) {
fallback = new java.security.SecureRandom();
}
}
/**
* Generates a user-specified number of random bytes.
* It's thread-safe.
*
* @param bytes the array to be filled in with random bytes.
*/
@Override
public void nextBytes(byte[] bytes) {
if (!nativeEnabled || !nextRandBytes(bytes)) {
fallback.nextBytes(bytes);
}
}
@Override
public void setSeed(long seed) {
// Self-seeding.
}
/**
* Generates an integer containing the user-specified number of
* random bits (right justified, with leading zeros).
*
* @param numBits number of random bits to be generated, where
* 0 <= <code>numBits</code> <= 32.
*
* @return int an <code>int</code> containing the user-specified number
* of random bits (right justified, with leading zeros).
*/
@Override
final protected int next(int numBits) {
Preconditions.checkArgument(numBits >= 0 && numBits <= 32);
int numBytes = (numBits + 7) / 8;
byte b[] = new byte[numBytes];
int next = 0;
nextBytes(b);
for (int i = 0; i < numBytes; i++) {
next = (next << 8) + (b[i] & 0xFF);
}
return next >>> (numBytes * 8 - numBits);
}
private native static void initSR();
private native boolean nextRandBytes(byte[] bytes);
}
| 3,778 | 30.491667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
/**
* A KeyProvider factory for UGIs. It uses the credentials object associated
* with the current user to find keys. This provider is created using a
* URI of "user:///".
*/
@InterfaceAudience.Private
public class UserProvider extends KeyProvider {
public static final String SCHEME_NAME = "user";
private final UserGroupInformation user;
private final Credentials credentials;
private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
private UserProvider(Configuration conf) throws IOException {
super(conf);
user = UserGroupInformation.getCurrentUser();
credentials = user.getCredentials();
}
@Override
public boolean isTransient() {
return true;
}
@Override
public synchronized KeyVersion getKeyVersion(String versionName)
throws IOException {
byte[] bytes = credentials.getSecretKey(new Text(versionName));
if (bytes == null) {
return null;
}
return new KeyVersion(getBaseName(versionName), versionName, bytes);
}
@Override
public synchronized Metadata getMetadata(String name) throws IOException {
if (cache.containsKey(name)) {
return cache.get(name);
}
byte[] serialized = credentials.getSecretKey(new Text(name));
if (serialized == null) {
return null;
}
Metadata result = new Metadata(serialized);
cache.put(name, result);
return result;
}
@Override
public synchronized KeyVersion createKey(String name, byte[] material,
Options options) throws IOException {
Text nameT = new Text(name);
if (credentials.getSecretKey(nameT) != null) {
throw new IOException("Key " + name + " already exists in " + this);
}
if (options.getBitLength() != 8 * material.length) {
throw new IOException("Wrong key length. Required " +
options.getBitLength() + ", but got " + (8 * material.length));
}
Metadata meta = new Metadata(options.getCipher(), options.getBitLength(),
options.getDescription(), options.getAttributes(), new Date(), 1);
cache.put(name, meta);
String versionName = buildVersionName(name, 0);
credentials.addSecretKey(nameT, meta.serialize());
credentials.addSecretKey(new Text(versionName), material);
return new KeyVersion(name, versionName, material);
}
@Override
public synchronized void deleteKey(String name) throws IOException {
Metadata meta = getMetadata(name);
if (meta == null) {
throw new IOException("Key " + name + " does not exist in " + this);
}
for(int v=0; v < meta.getVersions(); ++v) {
credentials.removeSecretKey(new Text(buildVersionName(name, v)));
}
credentials.removeSecretKey(new Text(name));
cache.remove(name);
}
@Override
public synchronized KeyVersion rollNewVersion(String name,
byte[] material) throws IOException {
Metadata meta = getMetadata(name);
if (meta == null) {
throw new IOException("Key " + name + " not found");
}
if (meta.getBitLength() != 8 * material.length) {
throw new IOException("Wrong key length. Required " +
meta.getBitLength() + ", but got " + (8 * material.length));
}
int nextVersion = meta.addVersion();
credentials.addSecretKey(new Text(name), meta.serialize());
String versionName = buildVersionName(name, nextVersion);
credentials.addSecretKey(new Text(versionName), material);
return new KeyVersion(name, versionName, material);
}
@Override
public String toString() {
return SCHEME_NAME + ":///";
}
@Override
public synchronized void flush() {
user.addCredentials(credentials);
}
public static class Factory extends KeyProviderFactory {
@Override
public KeyProvider createProvider(URI providerName,
Configuration conf) throws IOException {
if (SCHEME_NAME.equals(providerName.getScheme())) {
return new UserProvider(conf);
}
return null;
}
}
@Override
public synchronized List<String> getKeys() throws IOException {
List<String> list = new ArrayList<String>();
List<Text> keys = credentials.getAllSecretKeys();
for (Text key : keys) {
if (key.find("@") == -1) {
list.add(key.toString());
}
}
return list;
}
@Override
public synchronized List<KeyVersion> getKeyVersions(String name) throws IOException {
List<KeyVersion> list = new ArrayList<KeyVersion>();
Metadata km = getMetadata(name);
if (km != null) {
int latestVersion = km.getVersions();
for (int i = 0; i < latestVersion; i++) {
KeyVersion v = getKeyVersion(buildVersionName(name, i));
if (v != null) {
list.add(v);
}
}
}
return list;
}
}
| 6,092 | 32.295082 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.GeneralSecurityException;
import java.security.SecureRandom;
import javax.crypto.Cipher;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.crypto.CryptoCodec;
import org.apache.hadoop.crypto.Decryptor;
import org.apache.hadoop.crypto.Encryptor;
/**
* A KeyProvider with Cryptographic Extensions specifically for generating
* and decrypting encrypted encryption keys.
*
*/
@InterfaceAudience.Private
public class KeyProviderCryptoExtension extends
KeyProviderExtension<KeyProviderCryptoExtension.CryptoExtension> {
/**
* Designates an encrypted encryption key, or EEK.
*/
public static final String EEK = "EEK";
/**
* Designates a decrypted encrypted encryption key, that is, an encryption key
* (EK).
*/
public static final String EK = "EK";
/**
* An encrypted encryption key (EEK) and related information. An EEK must be
* decrypted using the key's encryption key before it can be used.
*/
public static class EncryptedKeyVersion {
private String encryptionKeyName;
private String encryptionKeyVersionName;
private byte[] encryptedKeyIv;
private KeyVersion encryptedKeyVersion;
/**
* Create a new EncryptedKeyVersion.
*
* @param keyName Name of the encryption key used to
* encrypt the encrypted key.
* @param encryptionKeyVersionName Version name of the encryption key used
* to encrypt the encrypted key.
* @param encryptedKeyIv Initialization vector of the encrypted
* key. The IV of the encryption key used to
* encrypt the encrypted key is derived from
* this IV.
* @param encryptedKeyVersion The encrypted encryption key version.
*/
protected EncryptedKeyVersion(String keyName,
String encryptionKeyVersionName, byte[] encryptedKeyIv,
KeyVersion encryptedKeyVersion) {
this.encryptionKeyName = keyName;
this.encryptionKeyVersionName = encryptionKeyVersionName;
this.encryptedKeyIv = encryptedKeyIv;
this.encryptedKeyVersion = encryptedKeyVersion;
}
/**
* Factory method to create a new EncryptedKeyVersion that can then be
* passed into {@link #decryptEncryptedKey}. Note that the fields of the
* returned EncryptedKeyVersion will only partially be populated; it is not
* necessarily suitable for operations besides decryption.
*
* @param keyName Key name of the encryption key use to encrypt the
* encrypted key.
* @param encryptionKeyVersionName Version name of the encryption key used
* to encrypt the encrypted key.
* @param encryptedKeyIv Initialization vector of the encrypted
* key. The IV of the encryption key used to
* encrypt the encrypted key is derived from
* this IV.
* @param encryptedKeyMaterial Key material of the encrypted key.
* @return EncryptedKeyVersion suitable for decryption.
*/
public static EncryptedKeyVersion createForDecryption(String keyName,
String encryptionKeyVersionName, byte[] encryptedKeyIv,
byte[] encryptedKeyMaterial) {
KeyVersion encryptedKeyVersion = new KeyVersion(null, EEK,
encryptedKeyMaterial);
return new EncryptedKeyVersion(keyName, encryptionKeyVersionName,
encryptedKeyIv, encryptedKeyVersion);
}
/**
* @return Name of the encryption key used to encrypt the encrypted key.
*/
public String getEncryptionKeyName() {
return encryptionKeyName;
}
/**
* @return Version name of the encryption key used to encrypt the encrypted
* key.
*/
public String getEncryptionKeyVersionName() {
return encryptionKeyVersionName;
}
/**
* @return Initialization vector of the encrypted key. The IV of the
* encryption key used to encrypt the encrypted key is derived from this
* IV.
*/
public byte[] getEncryptedKeyIv() {
return encryptedKeyIv;
}
/**
* @return The encrypted encryption key version.
*/
public KeyVersion getEncryptedKeyVersion() {
return encryptedKeyVersion;
}
/**
* Derive the initialization vector (IV) for the encryption key from the IV
* of the encrypted key. This derived IV is used with the encryption key to
* decrypt the encrypted key.
* <p/>
* The alternative to this is using the same IV for both the encryption key
* and the encrypted key. Even a simple symmetric transformation like this
* improves security by avoiding IV re-use. IVs will also be fairly unique
* among different EEKs.
*
* @param encryptedKeyIV of the encrypted key (i.e. {@link
* #getEncryptedKeyIv()})
* @return IV for the encryption key
*/
protected static byte[] deriveIV(byte[] encryptedKeyIV) {
byte[] rIv = new byte[encryptedKeyIV.length];
// Do a simple XOR transformation to flip all the bits
for (int i = 0; i < encryptedKeyIV.length; i++) {
rIv[i] = (byte) (encryptedKeyIV[i] ^ 0xff);
}
return rIv;
}
}
/**
* CryptoExtension is a type of Extension that exposes methods to generate
* EncryptedKeys and to decrypt the same.
*/
public interface CryptoExtension extends KeyProviderExtension.Extension {
/**
* Calls to this method allows the underlying KeyProvider to warm-up any
* implementation specific caches used to store the Encrypted Keys.
* @param keyNames Array of Key Names
*/
public void warmUpEncryptedKeys(String... keyNames)
throws IOException;
/**
* Drains the Queue for the provided key.
*
* @param keyName the key to drain the Queue for
*/
public void drain(String keyName);
/**
* Generates a key material and encrypts it using the given key version name
* and initialization vector. The generated key material is of the same
* length as the <code>KeyVersion</code> material of the latest key version
* of the key and is encrypted using the same cipher.
* <p/>
* NOTE: The generated key is not stored by the <code>KeyProvider</code>
*
* @param encryptionKeyName
* The latest KeyVersion of this key's material will be encrypted.
* @return EncryptedKeyVersion with the generated key material, the version
* name is 'EEK' (for Encrypted Encryption Key)
* @throws IOException
* thrown if the key material could not be generated
* @throws GeneralSecurityException
* thrown if the key material could not be encrypted because of a
* cryptographic issue.
*/
public EncryptedKeyVersion generateEncryptedKey(
String encryptionKeyName) throws IOException,
GeneralSecurityException;
/**
* Decrypts an encrypted byte[] key material using the given a key version
* name and initialization vector.
*
* @param encryptedKeyVersion
* contains keyVersionName and IV to decrypt the encrypted key
* material
* @return a KeyVersion with the decrypted key material, the version name is
* 'EK' (For Encryption Key)
* @throws IOException
* thrown if the key material could not be decrypted
* @throws GeneralSecurityException
* thrown if the key material could not be decrypted because of a
* cryptographic issue.
*/
public KeyVersion decryptEncryptedKey(
EncryptedKeyVersion encryptedKeyVersion) throws IOException,
GeneralSecurityException;
}
private static class DefaultCryptoExtension implements CryptoExtension {
private final KeyProvider keyProvider;
private static final ThreadLocal<SecureRandom> RANDOM =
new ThreadLocal<SecureRandom>() {
@Override
protected SecureRandom initialValue() {
return new SecureRandom();
}
};
private DefaultCryptoExtension(KeyProvider keyProvider) {
this.keyProvider = keyProvider;
}
@Override
public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
throws IOException, GeneralSecurityException {
// Fetch the encryption key
KeyVersion encryptionKey = keyProvider.getCurrentKey(encryptionKeyName);
Preconditions.checkNotNull(encryptionKey,
"No KeyVersion exists for key '%s' ", encryptionKeyName);
// Generate random bytes for new key and IV
CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
final byte[] newKey = new byte[encryptionKey.getMaterial().length];
cc.generateSecureRandom(newKey);
final byte[] iv = new byte[cc.getCipherSuite().getAlgorithmBlockSize()];
cc.generateSecureRandom(iv);
// Encryption key IV is derived from new key's IV
final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
Encryptor encryptor = cc.createEncryptor();
encryptor.init(encryptionKey.getMaterial(), encryptionIV);
int keyLen = newKey.length;
ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen);
ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen);
bbIn.put(newKey);
bbIn.flip();
encryptor.encrypt(bbIn, bbOut);
bbOut.flip();
byte[] encryptedKey = new byte[keyLen];
bbOut.get(encryptedKey);
return new EncryptedKeyVersion(encryptionKeyName,
encryptionKey.getVersionName(), iv,
new KeyVersion(encryptionKey.getName(), EEK, encryptedKey));
}
@Override
public KeyVersion decryptEncryptedKey(
EncryptedKeyVersion encryptedKeyVersion) throws IOException,
GeneralSecurityException {
// Fetch the encryption key material
final String encryptionKeyVersionName =
encryptedKeyVersion.getEncryptionKeyVersionName();
final KeyVersion encryptionKey =
keyProvider.getKeyVersion(encryptionKeyVersionName);
Preconditions.checkNotNull(encryptionKey,
"KeyVersion name '%s' does not exist", encryptionKeyVersionName);
Preconditions.checkArgument(
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
.equals(KeyProviderCryptoExtension.EEK),
"encryptedKey version name must be '%s', is '%s'",
KeyProviderCryptoExtension.EEK,
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
);
// Encryption key IV is determined from encrypted key's IV
final byte[] encryptionIV =
EncryptedKeyVersion.deriveIV(encryptedKeyVersion.getEncryptedKeyIv());
CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
Decryptor decryptor = cc.createDecryptor();
decryptor.init(encryptionKey.getMaterial(), encryptionIV);
final KeyVersion encryptedKV =
encryptedKeyVersion.getEncryptedKeyVersion();
int keyLen = encryptedKV.getMaterial().length;
ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen);
ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen);
bbIn.put(encryptedKV.getMaterial());
bbIn.flip();
decryptor.decrypt(bbIn, bbOut);
bbOut.flip();
byte[] decryptedKey = new byte[keyLen];
bbOut.get(decryptedKey);
return new KeyVersion(encryptionKey.getName(), EK, decryptedKey);
}
@Override
public void warmUpEncryptedKeys(String... keyNames)
throws IOException {
// NO-OP since the default version does not cache any keys
}
@Override
public void drain(String keyName) {
// NO-OP since the default version does not cache any keys
}
}
/**
* This constructor is to be used by sub classes that provide
* delegating/proxying functionality to the {@link KeyProviderCryptoExtension}
* @param keyProvider
* @param extension
*/
protected KeyProviderCryptoExtension(KeyProvider keyProvider,
CryptoExtension extension) {
super(keyProvider, extension);
}
/**
* Notifies the Underlying CryptoExtension implementation to warm up any
* implementation specific caches for the specified KeyVersions
* @param keyNames Arrays of key Names
*/
public void warmUpEncryptedKeys(String... keyNames)
throws IOException {
getExtension().warmUpEncryptedKeys(keyNames);
}
/**
* Generates a key material and encrypts it using the given key version name
* and initialization vector. The generated key material is of the same
* length as the <code>KeyVersion</code> material and is encrypted using the
* same cipher.
* <p/>
* NOTE: The generated key is not stored by the <code>KeyProvider</code>
*
* @param encryptionKeyName The latest KeyVersion of this key's material will
* be encrypted.
* @return EncryptedKeyVersion with the generated key material, the version
* name is 'EEK' (for Encrypted Encryption Key)
* @throws IOException thrown if the key material could not be generated
* @throws GeneralSecurityException thrown if the key material could not be
* encrypted because of a cryptographic issue.
*/
public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
throws IOException,
GeneralSecurityException {
return getExtension().generateEncryptedKey(encryptionKeyName);
}
/**
* Decrypts an encrypted byte[] key material using the given a key version
* name and initialization vector.
*
* @param encryptedKey contains keyVersionName and IV to decrypt the encrypted
* key material
* @return a KeyVersion with the decrypted key material, the version name is
* 'EK' (For Encryption Key)
* @throws IOException thrown if the key material could not be decrypted
* @throws GeneralSecurityException thrown if the key material could not be
* decrypted because of a cryptographic issue.
*/
public KeyVersion decryptEncryptedKey(EncryptedKeyVersion encryptedKey)
throws IOException, GeneralSecurityException {
return getExtension().decryptEncryptedKey(encryptedKey);
}
/**
* Creates a <code>KeyProviderCryptoExtension</code> using a given
* {@link KeyProvider}.
* <p/>
* If the given <code>KeyProvider</code> implements the
* {@link CryptoExtension} interface the <code>KeyProvider</code> itself
* will provide the extension functionality, otherwise a default extension
* implementation will be used.
*
* @param keyProvider <code>KeyProvider</code> to use to create the
* <code>KeyProviderCryptoExtension</code> extension.
* @return a <code>KeyProviderCryptoExtension</code> instance using the
* given <code>KeyProvider</code>.
*/
public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
KeyProvider keyProvider) {
CryptoExtension cryptoExtension = (keyProvider instanceof CryptoExtension)
? (CryptoExtension) keyProvider
: new DefaultCryptoExtension(keyProvider);
return new KeyProviderCryptoExtension(keyProvider, cryptoExtension);
}
@Override
public void close() throws IOException {
if (getKeyProvider() != null) {
getKeyProvider().close();
}
}
}
| 16,581 | 38.387173 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import java.io.IOException;
/**
* A KeyProvider extension with the ability to add a renewer's Delegation
* Tokens to the provided Credentials.
*/
public class KeyProviderDelegationTokenExtension extends
KeyProviderExtension
<KeyProviderDelegationTokenExtension.DelegationTokenExtension> {
private static DelegationTokenExtension DEFAULT_EXTENSION =
new DefaultDelegationTokenExtension();
/**
* DelegationTokenExtension is a type of Extension that exposes methods to
* needed to work with Delegation Tokens.
*/
public interface DelegationTokenExtension extends
KeyProviderExtension.Extension {
/**
* The implementer of this class will take a renewer and add all
* delegation tokens associated with the renewer to the
* <code>Credentials</code> object if it is not already present,
* @param renewer the user allowed to renew the delegation tokens
* @param credentials cache in which to add new delegation tokens
* @return list of new delegation tokens
* @throws IOException thrown if IOException if an IO error occurs.
*/
public Token<?>[] addDelegationTokens(final String renewer,
Credentials credentials) throws IOException;
}
/**
* Default implementation of {@link DelegationTokenExtension} that
* implements the method as a no-op.
*/
private static class DefaultDelegationTokenExtension implements
DelegationTokenExtension {
@Override
public Token<?>[] addDelegationTokens(String renewer,
Credentials credentials) {
return null;
}
}
private KeyProviderDelegationTokenExtension(KeyProvider keyProvider,
DelegationTokenExtension extensions) {
super(keyProvider, extensions);
}
/**
* Passes the renewer and Credentials object to the underlying
* {@link DelegationTokenExtension}
* @param renewer the user allowed to renew the delegation tokens
* @param credentials cache in which to add new delegation tokens
* @return list of new delegation tokens
* @throws IOException thrown if IOException if an IO error occurs.
*/
public Token<?>[] addDelegationTokens(final String renewer,
Credentials credentials) throws IOException {
return getExtension().addDelegationTokens(renewer, credentials);
}
/**
* Creates a <code>KeyProviderDelegationTokenExtension</code> using a given
* {@link KeyProvider}.
* <p/>
* If the given <code>KeyProvider</code> implements the
* {@link DelegationTokenExtension} interface the <code>KeyProvider</code>
* itself will provide the extension functionality, otherwise a default
* extension implementation will be used.
*
* @param keyProvider <code>KeyProvider</code> to use to create the
* <code>KeyProviderDelegationTokenExtension</code> extension.
* @return a <code>KeyProviderDelegationTokenExtension</code> instance
* using the given <code>KeyProvider</code>.
*/
public static KeyProviderDelegationTokenExtension
createKeyProviderDelegationTokenExtension(KeyProvider keyProvider) {
DelegationTokenExtension delTokExtension =
(keyProvider instanceof DelegationTokenExtension) ?
(DelegationTokenExtension) keyProvider :
DEFAULT_EXTENSION;
return new KeyProviderDelegationTokenExtension(
keyProvider, delTokExtension);
}
}
| 4,289 | 35.982759 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.ServiceLoader;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* A factory to create a list of KeyProvider based on the path given in a
* Configuration. It uses a service loader interface to find the available
* KeyProviders and create them based on the list of URIs.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public abstract class KeyProviderFactory {
public static final String KEY_PROVIDER_PATH =
"hadoop.security.key.provider.path";
public abstract KeyProvider createProvider(URI providerName,
Configuration conf
) throws IOException;
private static final ServiceLoader<KeyProviderFactory> serviceLoader =
ServiceLoader.load(KeyProviderFactory.class,
KeyProviderFactory.class.getClassLoader());
// Iterate through the serviceLoader to avoid lazy loading.
// Lazy loading would require synchronization in concurrent use cases.
static {
Iterator<KeyProviderFactory> iterServices = serviceLoader.iterator();
while (iterServices.hasNext()) {
iterServices.next();
}
}
public static List<KeyProvider> getProviders(Configuration conf
) throws IOException {
List<KeyProvider> result = new ArrayList<KeyProvider>();
for(String path: conf.getStringCollection(KEY_PROVIDER_PATH)) {
try {
URI uri = new URI(path);
KeyProvider kp = get(uri, conf);
if (kp != null) {
result.add(kp);
} else {
throw new IOException("No KeyProviderFactory for " + uri + " in " +
KEY_PROVIDER_PATH);
}
} catch (URISyntaxException error) {
throw new IOException("Bad configuration of " + KEY_PROVIDER_PATH +
" at " + path, error);
}
}
return result;
}
/**
* Create a KeyProvider based on a provided URI.
*
* @param uri key provider URI
* @param conf configuration to initialize the key provider
* @return the key provider for the specified URI, or <code>NULL</code> if
* a provider for the specified URI scheme could not be found.
* @throws IOException thrown if the provider failed to initialize.
*/
public static KeyProvider get(URI uri, Configuration conf)
throws IOException {
KeyProvider kp = null;
for (KeyProviderFactory factory : serviceLoader) {
kp = factory.createProvider(uri, conf);
if (kp != null) {
break;
}
}
return kp;
}
}
| 3,692 | 34.509615 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.util.List;
/**
* This is a utility class used to extend the functionality of KeyProvider, that
* takes a KeyProvider and an Extension. It implements all the required methods
* of the KeyProvider by delegating it to the provided KeyProvider.
*/
public abstract class KeyProviderExtension
<E extends KeyProviderExtension.Extension> extends KeyProvider {
/**
* A marker interface for the KeyProviderExtension subclass implement.
*/
public static interface Extension {
}
private KeyProvider keyProvider;
private E extension;
public KeyProviderExtension(KeyProvider keyProvider, E extensions) {
super(keyProvider.getConf());
this.keyProvider = keyProvider;
this.extension = extensions;
}
protected E getExtension() {
return extension;
}
protected KeyProvider getKeyProvider() {
return keyProvider;
}
@Override
public boolean isTransient() {
return keyProvider.isTransient();
}
@Override
public Metadata[] getKeysMetadata(String... names) throws IOException {
return keyProvider.getKeysMetadata(names);
}
@Override
public KeyVersion getCurrentKey(String name) throws IOException {
return keyProvider.getCurrentKey(name);
}
@Override
public KeyVersion createKey(String name, Options options)
throws NoSuchAlgorithmException, IOException {
return keyProvider.createKey(name, options);
}
@Override
public KeyVersion rollNewVersion(String name)
throws NoSuchAlgorithmException, IOException {
return keyProvider.rollNewVersion(name);
}
@Override
public KeyVersion getKeyVersion(String versionName) throws IOException {
return keyProvider.getKeyVersion(versionName);
}
@Override
public List<String> getKeys() throws IOException {
return keyProvider.getKeys();
}
@Override
public List<KeyVersion> getKeyVersions(String name) throws IOException {
return keyProvider.getKeyVersions(name);
}
@Override
public Metadata getMetadata(String name) throws IOException {
return keyProvider.getMetadata(name);
}
@Override
public KeyVersion createKey(String name, byte[] material, Options options)
throws IOException {
return keyProvider.createKey(name, material, options);
}
@Override
public void deleteKey(String name) throws IOException {
keyProvider.deleteKey(name);
}
@Override
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
return keyProvider.rollNewVersion(name, material);
}
@Override
public void flush() throws IOException {
keyProvider.flush();
}
@Override
public String toString() {
return getClass().getSimpleName() + ": " + keyProvider.toString();
}
}
| 3,647 | 27.061538 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import com.google.common.base.Preconditions;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import javax.crypto.spec.SecretKeySpec;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.net.URI;
import java.net.URL;
import java.security.Key;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.UnrecoverableKeyException;
import java.security.cert.CertificateException;
import java.util.ArrayList;
import java.util.Date;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* KeyProvider based on Java's KeyStore file format. The file may be stored in
* any Hadoop FileSystem using the following name mangling:
* jks://[email protected]/my/keys.jks -> hdfs://nn1.example.com/my/keys.jks
* jks://file/home/owen/keys.jks -> file:///home/owen/keys.jks
* <p/>
* If the <code>HADOOP_KEYSTORE_PASSWORD</code> environment variable is set,
* its value is used as the password for the keystore.
* <p/>
* If the <code>HADOOP_KEYSTORE_PASSWORD</code> environment variable is not set,
* the password for the keystore is read from file specified in the
* {@link #KEYSTORE_PASSWORD_FILE_KEY} configuration property. The password file
* is looked up in Hadoop's configuration directory via the classpath.
* <p/>
* <b>NOTE:</b> Make sure the password in the password file does not have an
* ENTER at the end, else it won't be valid for the Java KeyStore.
* <p/>
* If the environment variable, nor the property are not set, the password used
* is 'none'.
* <p/>
* It is expected for encrypted InputFormats and OutputFormats to copy the keys
* from the original provider into the job's Credentials object, which is
* accessed via the UserProvider. Therefore, this provider won't be used by
* MapReduce tasks.
*/
@InterfaceAudience.Private
public class JavaKeyStoreProvider extends KeyProvider {
private static final String KEY_METADATA = "KeyMetadata";
private static Logger LOG =
LoggerFactory.getLogger(JavaKeyStoreProvider.class);
public static final String SCHEME_NAME = "jceks";
public static final String KEYSTORE_PASSWORD_FILE_KEY =
"hadoop.security.keystore.java-keystore-provider.password-file";
public static final String KEYSTORE_PASSWORD_ENV_VAR =
"HADOOP_KEYSTORE_PASSWORD";
public static final char[] KEYSTORE_PASSWORD_DEFAULT = "none".toCharArray();
private final URI uri;
private final Path path;
private final FileSystem fs;
private final FsPermission permissions;
private final KeyStore keyStore;
private char[] password;
private boolean changed = false;
private Lock readLock;
private Lock writeLock;
private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
@VisibleForTesting
JavaKeyStoreProvider(JavaKeyStoreProvider other) {
super(new Configuration());
uri = other.uri;
path = other.path;
fs = other.fs;
permissions = other.permissions;
keyStore = other.keyStore;
password = other.password;
changed = other.changed;
readLock = other.readLock;
writeLock = other.writeLock;
}
private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
super(conf);
this.uri = uri;
path = ProviderUtils.unnestUri(uri);
fs = path.getFileSystem(conf);
// Get the password file from the conf, if not present from the user's
// environment var
if (System.getenv().containsKey(KEYSTORE_PASSWORD_ENV_VAR)) {
password = System.getenv(KEYSTORE_PASSWORD_ENV_VAR).toCharArray();
}
if (password == null) {
String pwFile = conf.get(KEYSTORE_PASSWORD_FILE_KEY);
if (pwFile != null) {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL pwdFile = cl.getResource(pwFile);
if (pwdFile == null) {
// Provided Password file does not exist
throw new IOException("Password file does not exists");
}
try (InputStream is = pwdFile.openStream()) {
password = IOUtils.toString(is).trim().toCharArray();
}
}
}
if (password == null) {
password = KEYSTORE_PASSWORD_DEFAULT;
}
try {
Path oldPath = constructOldPath(path);
Path newPath = constructNewPath(path);
keyStore = KeyStore.getInstance(SCHEME_NAME);
FsPermission perm = null;
if (fs.exists(path)) {
// flush did not proceed to completion
// _NEW should not exist
if (fs.exists(newPath)) {
throw new IOException(
String.format("Keystore not loaded due to some inconsistency "
+ "('%s' and '%s' should not exist together)!!", path, newPath));
}
perm = tryLoadFromPath(path, oldPath);
} else {
perm = tryLoadIncompleteFlush(oldPath, newPath);
}
// Need to save off permissions in case we need to
// rewrite the keystore in flush()
permissions = perm;
} catch (KeyStoreException e) {
throw new IOException("Can't create keystore", e);
} catch (NoSuchAlgorithmException e) {
throw new IOException("Can't load keystore " + path, e);
} catch (CertificateException e) {
throw new IOException("Can't load keystore " + path, e);
}
ReadWriteLock lock = new ReentrantReadWriteLock(true);
readLock = lock.readLock();
writeLock = lock.writeLock();
}
/**
* Try loading from the user specified path, else load from the backup
* path in case Exception is not due to bad/wrong password
* @param path Actual path to load from
* @param backupPath Backup path (_OLD)
* @return The permissions of the loaded file
* @throws NoSuchAlgorithmException
* @throws CertificateException
* @throws IOException
*/
private FsPermission tryLoadFromPath(Path path, Path backupPath)
throws NoSuchAlgorithmException, CertificateException,
IOException {
FsPermission perm = null;
try {
perm = loadFromPath(path, password);
// Remove _OLD if exists
if (fs.exists(backupPath)) {
fs.delete(backupPath, true);
}
LOG.debug("KeyStore loaded successfully !!");
} catch (IOException ioe) {
// If file is corrupted for some reason other than
// wrong password try the _OLD file if exits
if (!isBadorWrongPassword(ioe)) {
perm = loadFromPath(backupPath, password);
// Rename CURRENT to CORRUPTED
renameOrFail(path, new Path(path.toString() + "_CORRUPTED_"
+ System.currentTimeMillis()));
renameOrFail(backupPath, path);
if (LOG.isDebugEnabled()) {
LOG.debug(String.format(
"KeyStore loaded successfully from '%s' since '%s'"
+ "was corrupted !!", backupPath, path));
}
} else {
throw ioe;
}
}
return perm;
}
/**
* The KeyStore might have gone down during a flush, In which case either the
* _NEW or _OLD files might exists. This method tries to load the KeyStore
* from one of these intermediate files.
* @param oldPath the _OLD file created during flush
* @param newPath the _NEW file created during flush
* @return The permissions of the loaded file
* @throws IOException
* @throws NoSuchAlgorithmException
* @throws CertificateException
*/
private FsPermission tryLoadIncompleteFlush(Path oldPath, Path newPath)
throws IOException, NoSuchAlgorithmException, CertificateException {
FsPermission perm = null;
// Check if _NEW exists (in case flush had finished writing but not
// completed the re-naming)
if (fs.exists(newPath)) {
perm = loadAndReturnPerm(newPath, oldPath);
}
// try loading from _OLD (An earlier Flushing MIGHT not have completed
// writing completely)
if ((perm == null) && fs.exists(oldPath)) {
perm = loadAndReturnPerm(oldPath, newPath);
}
// If not loaded yet,
// required to create an empty keystore. *sigh*
if (perm == null) {
keyStore.load(null, password);
LOG.debug("KeyStore initialized anew successfully !!");
perm = new FsPermission("700");
}
return perm;
}
private FsPermission loadAndReturnPerm(Path pathToLoad, Path pathToDelete)
throws NoSuchAlgorithmException, CertificateException,
IOException {
FsPermission perm = null;
try {
perm = loadFromPath(pathToLoad, password);
renameOrFail(pathToLoad, path);
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
pathToLoad));
}
if (fs.exists(pathToDelete)) {
fs.delete(pathToDelete, true);
}
} catch (IOException e) {
// Check for password issue : don't want to trash file due
// to wrong password
if (isBadorWrongPassword(e)) {
throw e;
}
}
return perm;
}
private boolean isBadorWrongPassword(IOException ioe) {
// As per documentation this is supposed to be the way to figure
// if password was correct
if (ioe.getCause() instanceof UnrecoverableKeyException) {
return true;
}
// Unfortunately that doesn't seem to work..
// Workaround :
if ((ioe.getCause() == null)
&& (ioe.getMessage() != null)
&& ((ioe.getMessage().contains("Keystore was tampered")) || (ioe
.getMessage().contains("password was incorrect")))) {
return true;
}
return false;
}
private FsPermission loadFromPath(Path p, char[] password)
throws IOException, NoSuchAlgorithmException, CertificateException {
try (FSDataInputStream in = fs.open(p)) {
FileStatus s = fs.getFileStatus(p);
keyStore.load(in, password);
return s.getPermission();
}
}
private Path constructNewPath(Path path) {
Path newPath = new Path(path.toString() + "_NEW");
return newPath;
}
private Path constructOldPath(Path path) {
Path oldPath = new Path(path.toString() + "_OLD");
return oldPath;
}
@Override
public KeyVersion getKeyVersion(String versionName) throws IOException {
readLock.lock();
try {
SecretKeySpec key = null;
try {
if (!keyStore.containsAlias(versionName)) {
return null;
}
key = (SecretKeySpec) keyStore.getKey(versionName, password);
} catch (KeyStoreException e) {
throw new IOException("Can't get key " + versionName + " from " +
path, e);
} catch (NoSuchAlgorithmException e) {
throw new IOException("Can't get algorithm for key " + key + " from " +
path, e);
} catch (UnrecoverableKeyException e) {
throw new IOException("Can't recover key " + key + " from " + path, e);
}
return new KeyVersion(getBaseName(versionName), versionName, key.getEncoded());
} finally {
readLock.unlock();
}
}
@Override
public List<String> getKeys() throws IOException {
readLock.lock();
try {
ArrayList<String> list = new ArrayList<String>();
String alias = null;
try {
Enumeration<String> e = keyStore.aliases();
while (e.hasMoreElements()) {
alias = e.nextElement();
// only include the metadata key names in the list of names
if (!alias.contains("@")) {
list.add(alias);
}
}
} catch (KeyStoreException e) {
throw new IOException("Can't get key " + alias + " from " + path, e);
}
return list;
} finally {
readLock.unlock();
}
}
@Override
public List<KeyVersion> getKeyVersions(String name) throws IOException {
readLock.lock();
try {
List<KeyVersion> list = new ArrayList<KeyVersion>();
Metadata km = getMetadata(name);
if (km != null) {
int latestVersion = km.getVersions();
KeyVersion v = null;
String versionName = null;
for (int i = 0; i < latestVersion; i++) {
versionName = buildVersionName(name, i);
v = getKeyVersion(versionName);
if (v != null) {
list.add(v);
}
}
}
return list;
} finally {
readLock.unlock();
}
}
@Override
public Metadata getMetadata(String name) throws IOException {
readLock.lock();
try {
if (cache.containsKey(name)) {
return cache.get(name);
}
try {
if (!keyStore.containsAlias(name)) {
return null;
}
Metadata meta = ((KeyMetadata) keyStore.getKey(name, password)).metadata;
cache.put(name, meta);
return meta;
} catch (ClassCastException e) {
throw new IOException("Can't cast key for " + name + " in keystore " +
path + " to a KeyMetadata. Key may have been added using " +
" keytool or some other non-Hadoop method.", e);
} catch (KeyStoreException e) {
throw new IOException("Can't get metadata for " + name +
" from keystore " + path, e);
} catch (NoSuchAlgorithmException e) {
throw new IOException("Can't get algorithm for " + name +
" from keystore " + path, e);
} catch (UnrecoverableKeyException e) {
throw new IOException("Can't recover key for " + name +
" from keystore " + path, e);
}
} finally {
readLock.unlock();
}
}
@Override
public KeyVersion createKey(String name, byte[] material,
Options options) throws IOException {
Preconditions.checkArgument(name.equals(StringUtils.toLowerCase(name)),
"Uppercase key names are unsupported: %s", name);
writeLock.lock();
try {
try {
if (keyStore.containsAlias(name) || cache.containsKey(name)) {
throw new IOException("Key " + name + " already exists in " + this);
}
} catch (KeyStoreException e) {
throw new IOException("Problem looking up key " + name + " in " + this,
e);
}
Metadata meta = new Metadata(options.getCipher(), options.getBitLength(),
options.getDescription(), options.getAttributes(), new Date(), 1);
if (options.getBitLength() != 8 * material.length) {
throw new IOException("Wrong key length. Required " +
options.getBitLength() + ", but got " + (8 * material.length));
}
cache.put(name, meta);
String versionName = buildVersionName(name, 0);
return innerSetKeyVersion(name, versionName, material, meta.getCipher());
} finally {
writeLock.unlock();
}
}
@Override
public void deleteKey(String name) throws IOException {
writeLock.lock();
try {
Metadata meta = getMetadata(name);
if (meta == null) {
throw new IOException("Key " + name + " does not exist in " + this);
}
for(int v=0; v < meta.getVersions(); ++v) {
String versionName = buildVersionName(name, v);
try {
if (keyStore.containsAlias(versionName)) {
keyStore.deleteEntry(versionName);
}
} catch (KeyStoreException e) {
throw new IOException("Problem removing " + versionName + " from " +
this, e);
}
}
try {
if (keyStore.containsAlias(name)) {
keyStore.deleteEntry(name);
}
} catch (KeyStoreException e) {
throw new IOException("Problem removing " + name + " from " + this, e);
}
cache.remove(name);
changed = true;
} finally {
writeLock.unlock();
}
}
KeyVersion innerSetKeyVersion(String name, String versionName, byte[] material,
String cipher) throws IOException {
try {
keyStore.setKeyEntry(versionName, new SecretKeySpec(material, cipher),
password, null);
} catch (KeyStoreException e) {
throw new IOException("Can't store key " + versionName + " in " + this,
e);
}
changed = true;
return new KeyVersion(name, versionName, material);
}
@Override
public KeyVersion rollNewVersion(String name,
byte[] material) throws IOException {
writeLock.lock();
try {
Metadata meta = getMetadata(name);
if (meta == null) {
throw new IOException("Key " + name + " not found");
}
if (meta.getBitLength() != 8 * material.length) {
throw new IOException("Wrong key length. Required " +
meta.getBitLength() + ", but got " + (8 * material.length));
}
int nextVersion = meta.addVersion();
String versionName = buildVersionName(name, nextVersion);
return innerSetKeyVersion(name, versionName, material, meta.getCipher());
} finally {
writeLock.unlock();
}
}
@Override
public void flush() throws IOException {
Path newPath = constructNewPath(path);
Path oldPath = constructOldPath(path);
Path resetPath = path;
writeLock.lock();
try {
if (!changed) {
return;
}
// Might exist if a backup has been restored etc.
if (fs.exists(newPath)) {
renameOrFail(newPath, new Path(newPath.toString()
+ "_ORPHANED_" + System.currentTimeMillis()));
}
if (fs.exists(oldPath)) {
renameOrFail(oldPath, new Path(oldPath.toString()
+ "_ORPHANED_" + System.currentTimeMillis()));
}
// put all of the updates into the keystore
for(Map.Entry<String, Metadata> entry: cache.entrySet()) {
try {
keyStore.setKeyEntry(entry.getKey(), new KeyMetadata(entry.getValue()),
password, null);
} catch (KeyStoreException e) {
throw new IOException("Can't set metadata key " + entry.getKey(),e );
}
}
// Save old File first
boolean fileExisted = backupToOld(oldPath);
if (fileExisted) {
resetPath = oldPath;
}
// write out the keystore
// Write to _NEW path first :
try {
writeToNew(newPath);
} catch (IOException ioe) {
// rename _OLD back to curent and throw Exception
revertFromOld(oldPath, fileExisted);
resetPath = path;
throw ioe;
}
// Rename _NEW to CURRENT and delete _OLD
cleanupNewAndOld(newPath, oldPath);
changed = false;
} catch (IOException ioe) {
resetKeyStoreState(resetPath);
throw ioe;
} finally {
writeLock.unlock();
}
}
private void resetKeyStoreState(Path path) {
LOG.debug("Could not flush Keystore.."
+ "attempting to reset to previous state !!");
// 1) flush cache
cache.clear();
// 2) load keyStore from previous path
try {
loadFromPath(path, password);
LOG.debug("KeyStore resetting to previously flushed state !!");
} catch (Exception e) {
LOG.debug("Could not reset Keystore to previous state", e);
}
}
private void cleanupNewAndOld(Path newPath, Path oldPath) throws IOException {
// Rename _NEW to CURRENT
renameOrFail(newPath, path);
// Delete _OLD
if (fs.exists(oldPath)) {
fs.delete(oldPath, true);
}
}
protected void writeToNew(Path newPath) throws IOException {
try (FSDataOutputStream out =
FileSystem.create(fs, newPath, permissions);) {
keyStore.store(out, password);
} catch (KeyStoreException e) {
throw new IOException("Can't store keystore " + this, e);
} catch (NoSuchAlgorithmException e) {
throw new IOException(
"No such algorithm storing keystore " + this, e);
} catch (CertificateException e) {
throw new IOException(
"Certificate exception storing keystore " + this, e);
}
}
protected boolean backupToOld(Path oldPath)
throws IOException {
boolean fileExisted = false;
if (fs.exists(path)) {
renameOrFail(path, oldPath);
fileExisted = true;
}
return fileExisted;
}
private void revertFromOld(Path oldPath, boolean fileExisted)
throws IOException {
if (fileExisted) {
renameOrFail(oldPath, path);
}
}
private void renameOrFail(Path src, Path dest)
throws IOException {
if (!fs.rename(src, dest)) {
throw new IOException("Rename unsuccessful : "
+ String.format("'%s' to '%s'", src, dest));
}
}
@Override
public String toString() {
return uri.toString();
}
/**
* The factory to create JksProviders, which is used by the ServiceLoader.
*/
public static class Factory extends KeyProviderFactory {
@Override
public KeyProvider createProvider(URI providerName,
Configuration conf) throws IOException {
if (SCHEME_NAME.equals(providerName.getScheme())) {
return new JavaKeyStoreProvider(providerName, conf);
}
return null;
}
}
/**
* An adapter between a KeyStore Key and our Metadata. This is used to store
* the metadata in a KeyStore even though isn't really a key.
*/
public static class KeyMetadata implements Key, Serializable {
private Metadata metadata;
private final static long serialVersionUID = 8405872419967874451L;
private KeyMetadata(Metadata meta) {
this.metadata = meta;
}
@Override
public String getAlgorithm() {
return metadata.getCipher();
}
@Override
public String getFormat() {
return KEY_METADATA;
}
@Override
public byte[] getEncoded() {
return new byte[0];
}
private void writeObject(ObjectOutputStream out) throws IOException {
byte[] serialized = metadata.serialize();
out.writeInt(serialized.length);
out.write(serialized);
}
private void readObject(ObjectInputStream in
) throws IOException, ClassNotFoundException {
byte[] buf = new byte[in.readInt()];
in.readFully(buf);
metadata = new Metadata(buf);
}
}
}
| 23,701 | 32.572238 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.security.NoSuchAlgorithmException;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import javax.crypto.KeyGenerator;
/**
* A provider of secret key material for Hadoop applications. Provides an
* abstraction to separate key storage from users of encryption. It
* is intended to support getting or storing keys in a variety of ways,
* including third party bindings.
* <P/>
* <code>KeyProvider</code> implementations must be thread safe.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public abstract class KeyProvider {
public static final String DEFAULT_CIPHER_NAME =
"hadoop.security.key.default.cipher";
public static final String DEFAULT_CIPHER = "AES/CTR/NoPadding";
public static final String DEFAULT_BITLENGTH_NAME =
"hadoop.security.key.default.bitlength";
public static final int DEFAULT_BITLENGTH = 128;
private final Configuration conf;
/**
* The combination of both the key version name and the key material.
*/
public static class KeyVersion {
private final String name;
private final String versionName;
private final byte[] material;
protected KeyVersion(String name, String versionName,
byte[] material) {
this.name = name;
this.versionName = versionName;
this.material = material;
}
public String getName() {
return name;
}
public String getVersionName() {
return versionName;
}
public byte[] getMaterial() {
return material;
}
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append("key(");
buf.append(versionName);
buf.append(")=");
if (material == null) {
buf.append("null");
} else {
for(byte b: material) {
buf.append(' ');
int right = b & 0xff;
if (right < 0x10) {
buf.append('0');
}
buf.append(Integer.toHexString(right));
}
}
return buf.toString();
}
}
/**
* Key metadata that is associated with the key.
*/
public static class Metadata {
private final static String CIPHER_FIELD = "cipher";
private final static String BIT_LENGTH_FIELD = "bitLength";
private final static String CREATED_FIELD = "created";
private final static String DESCRIPTION_FIELD = "description";
private final static String VERSIONS_FIELD = "versions";
private final static String ATTRIBUTES_FIELD = "attributes";
private final String cipher;
private final int bitLength;
private final String description;
private final Date created;
private int versions;
private Map<String, String> attributes;
protected Metadata(String cipher, int bitLength, String description,
Map<String, String> attributes, Date created, int versions) {
this.cipher = cipher;
this.bitLength = bitLength;
this.description = description;
this.attributes = (attributes == null || attributes.isEmpty())
? null : attributes;
this.created = created;
this.versions = versions;
}
public String toString() {
final StringBuilder metaSB = new StringBuilder();
metaSB.append("cipher: ").append(cipher).append(", ");
metaSB.append("length: ").append(bitLength).append(", ");
metaSB.append("description: ").append(description).append(", ");
metaSB.append("created: ").append(created).append(", ");
metaSB.append("version: ").append(versions).append(", ");
metaSB.append("attributes: ");
if ((attributes != null) && !attributes.isEmpty()) {
for (Map.Entry<String, String> attribute : attributes.entrySet()) {
metaSB.append("[");
metaSB.append(attribute.getKey());
metaSB.append("=");
metaSB.append(attribute.getValue());
metaSB.append("], ");
}
metaSB.deleteCharAt(metaSB.length() - 2); // remove last ', '
} else {
metaSB.append("null");
}
return metaSB.toString();
}
public String getDescription() {
return description;
}
public Date getCreated() {
return created;
}
public String getCipher() {
return cipher;
}
@SuppressWarnings("unchecked")
public Map<String, String> getAttributes() {
return (attributes == null) ? Collections.EMPTY_MAP : attributes;
}
/**
* Get the algorithm from the cipher.
* @return the algorithm name
*/
public String getAlgorithm() {
int slash = cipher.indexOf('/');
if (slash == - 1) {
return cipher;
} else {
return cipher.substring(0, slash);
}
}
public int getBitLength() {
return bitLength;
}
public int getVersions() {
return versions;
}
protected int addVersion() {
return versions++;
}
/**
* Serialize the metadata to a set of bytes.
* @return the serialized bytes
* @throws IOException
*/
protected byte[] serialize() throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
JsonWriter writer = new JsonWriter(
new OutputStreamWriter(buffer, Charsets.UTF_8));
try {
writer.beginObject();
if (cipher != null) {
writer.name(CIPHER_FIELD).value(cipher);
}
if (bitLength != 0) {
writer.name(BIT_LENGTH_FIELD).value(bitLength);
}
if (created != null) {
writer.name(CREATED_FIELD).value(created.getTime());
}
if (description != null) {
writer.name(DESCRIPTION_FIELD).value(description);
}
if (attributes != null && attributes.size() > 0) {
writer.name(ATTRIBUTES_FIELD).beginObject();
for (Map.Entry<String, String> attribute : attributes.entrySet()) {
writer.name(attribute.getKey()).value(attribute.getValue());
}
writer.endObject();
}
writer.name(VERSIONS_FIELD).value(versions);
writer.endObject();
writer.flush();
} finally {
writer.close();
}
return buffer.toByteArray();
}
/**
* Deserialize a new metadata object from a set of bytes.
* @param bytes the serialized metadata
* @throws IOException
*/
protected Metadata(byte[] bytes) throws IOException {
String cipher = null;
int bitLength = 0;
Date created = null;
int versions = 0;
String description = null;
Map<String, String> attributes = null;
JsonReader reader = new JsonReader(new InputStreamReader
(new ByteArrayInputStream(bytes), Charsets.UTF_8));
try {
reader.beginObject();
while (reader.hasNext()) {
String field = reader.nextName();
if (CIPHER_FIELD.equals(field)) {
cipher = reader.nextString();
} else if (BIT_LENGTH_FIELD.equals(field)) {
bitLength = reader.nextInt();
} else if (CREATED_FIELD.equals(field)) {
created = new Date(reader.nextLong());
} else if (VERSIONS_FIELD.equals(field)) {
versions = reader.nextInt();
} else if (DESCRIPTION_FIELD.equals(field)) {
description = reader.nextString();
} else if (ATTRIBUTES_FIELD.equalsIgnoreCase(field)) {
reader.beginObject();
attributes = new HashMap<String, String>();
while (reader.hasNext()) {
attributes.put(reader.nextName(), reader.nextString());
}
reader.endObject();
}
}
reader.endObject();
} finally {
reader.close();
}
this.cipher = cipher;
this.bitLength = bitLength;
this.created = created;
this.description = description;
this.attributes = attributes;
this.versions = versions;
}
}
/**
* Options when creating key objects.
*/
public static class Options {
private String cipher;
private int bitLength;
private String description;
private Map<String, String> attributes;
public Options(Configuration conf) {
cipher = conf.get(DEFAULT_CIPHER_NAME, DEFAULT_CIPHER);
bitLength = conf.getInt(DEFAULT_BITLENGTH_NAME, DEFAULT_BITLENGTH);
}
public Options setCipher(String cipher) {
this.cipher = cipher;
return this;
}
public Options setBitLength(int bitLength) {
this.bitLength = bitLength;
return this;
}
public Options setDescription(String description) {
this.description = description;
return this;
}
public Options setAttributes(Map<String, String> attributes) {
if (attributes != null) {
if (attributes.containsKey(null)) {
throw new IllegalArgumentException("attributes cannot have a NULL key");
}
this.attributes = new HashMap<String, String>(attributes);
}
return this;
}
public String getCipher() {
return cipher;
}
public int getBitLength() {
return bitLength;
}
public String getDescription() {
return description;
}
@SuppressWarnings("unchecked")
public Map<String, String> getAttributes() {
return (attributes == null) ? Collections.EMPTY_MAP : attributes;
}
@Override
public String toString() {
return "Options{" +
"cipher='" + cipher + '\'' +
", bitLength=" + bitLength +
", description='" + description + '\'' +
", attributes=" + attributes +
'}';
}
}
/**
* Constructor.
*
* @param conf configuration for the provider
*/
public KeyProvider(Configuration conf) {
this.conf = new Configuration(conf);
}
/**
* Return the provider configuration.
*
* @return the provider configuration
*/
public Configuration getConf() {
return conf;
}
/**
* A helper function to create an options object.
* @param conf the configuration to use
* @return a new options object
*/
public static Options options(Configuration conf) {
return new Options(conf);
}
/**
* Indicates whether this provider represents a store
* that is intended for transient use - such as the UserProvider
* is. These providers are generally used to provide access to
* keying material rather than for long term storage.
* @return true if transient, false otherwise
*/
public boolean isTransient() {
return false;
}
/**
* Get the key material for a specific version of the key. This method is used
* when decrypting data.
* @param versionName the name of a specific version of the key
* @return the key material
* @throws IOException
*/
public abstract KeyVersion getKeyVersion(String versionName
) throws IOException;
/**
* Get the key names for all keys.
* @return the list of key names
* @throws IOException
*/
public abstract List<String> getKeys() throws IOException;
/**
* Get key metadata in bulk.
* @param names the names of the keys to get
* @throws IOException
*/
public Metadata[] getKeysMetadata(String... names) throws IOException {
Metadata[] result = new Metadata[names.length];
for (int i=0; i < names.length; ++i) {
result[i] = getMetadata(names[i]);
}
return result;
}
/**
* Get the key material for all versions of a specific key name.
* @return the list of key material
* @throws IOException
*/
public abstract List<KeyVersion> getKeyVersions(String name) throws IOException;
/**
* Get the current version of the key, which should be used for encrypting new
* data.
* @param name the base name of the key
* @return the version name of the current version of the key or null if the
* key version doesn't exist
* @throws IOException
*/
public KeyVersion getCurrentKey(String name) throws IOException {
Metadata meta = getMetadata(name);
if (meta == null) {
return null;
}
return getKeyVersion(buildVersionName(name, meta.getVersions() - 1));
}
/**
* Get metadata about the key.
* @param name the basename of the key
* @return the key's metadata or null if the key doesn't exist
* @throws IOException
*/
public abstract Metadata getMetadata(String name) throws IOException;
/**
* Create a new key. The given key must not already exist.
* @param name the base name of the key
* @param material the key material for the first version of the key.
* @param options the options for the new key.
* @return the version name of the first version of the key.
* @throws IOException
*/
public abstract KeyVersion createKey(String name, byte[] material,
Options options) throws IOException;
/**
* Get the algorithm from the cipher.
*
* @return the algorithm name
*/
private String getAlgorithm(String cipher) {
int slash = cipher.indexOf('/');
if (slash == -1) {
return cipher;
} else {
return cipher.substring(0, slash);
}
}
/**
* Generates a key material.
*
* @param size length of the key.
* @param algorithm algorithm to use for generating the key.
* @return the generated key.
* @throws NoSuchAlgorithmException
*/
protected byte[] generateKey(int size, String algorithm)
throws NoSuchAlgorithmException {
algorithm = getAlgorithm(algorithm);
KeyGenerator keyGenerator = KeyGenerator.getInstance(algorithm);
keyGenerator.init(size);
byte[] key = keyGenerator.generateKey().getEncoded();
return key;
}
/**
* Create a new key generating the material for it.
* The given key must not already exist.
* <p/>
* This implementation generates the key material and calls the
* {@link #createKey(String, byte[], Options)} method.
*
* @param name the base name of the key
* @param options the options for the new key.
* @return the version name of the first version of the key.
* @throws IOException
* @throws NoSuchAlgorithmException
*/
public KeyVersion createKey(String name, Options options)
throws NoSuchAlgorithmException, IOException {
byte[] material = generateKey(options.getBitLength(), options.getCipher());
return createKey(name, material, options);
}
/**
* Delete the given key.
* @param name the name of the key to delete
* @throws IOException
*/
public abstract void deleteKey(String name) throws IOException;
/**
* Roll a new version of the given key.
* @param name the basename of the key
* @param material the new key material
* @return the name of the new version of the key
* @throws IOException
*/
public abstract KeyVersion rollNewVersion(String name,
byte[] material
) throws IOException;
/**
* Can be used by implementing classes to close any resources
* that require closing
*/
public void close() throws IOException {
// NOP
}
/**
* Roll a new version of the given key generating the material for it.
* <p/>
* This implementation generates the key material and calls the
* {@link #rollNewVersion(String, byte[])} method.
*
* @param name the basename of the key
* @return the name of the new version of the key
* @throws IOException
*/
public KeyVersion rollNewVersion(String name) throws NoSuchAlgorithmException,
IOException {
Metadata meta = getMetadata(name);
byte[] material = generateKey(meta.getBitLength(), meta.getCipher());
return rollNewVersion(name, material);
}
/**
* Ensures that any changes to the keys are written to persistent store.
* @throws IOException
*/
public abstract void flush() throws IOException;
/**
* Split the versionName in to a base name. Converts "/aaa/bbb/3" to
* "/aaa/bbb".
* @param versionName the version name to split
* @return the base name of the key
* @throws IOException
*/
public static String getBaseName(String versionName) throws IOException {
int div = versionName.lastIndexOf('@');
if (div == -1) {
throw new IOException("No version in key path " + versionName);
}
return versionName.substring(0, div);
}
/**
* Build a version string from a basename and version number. Converts
* "/aaa/bbb" and 3 to "/aaa/bbb@3".
* @param name the basename of the key
* @param version the version of the key
* @return the versionName of the key.
*/
protected static String buildVersionName(String name, int version) {
return name + "@" + version;
}
/**
* Find the provider with the given key.
* @param providerList the list of providers
* @param keyName the key name we are looking for
* @return the KeyProvider that has the key
*/
public static KeyProvider findProvider(List<KeyProvider> providerList,
String keyName) throws IOException {
for(KeyProvider provider: providerList) {
if (provider.getMetadata(keyName) != null) {
return provider;
}
}
throw new IOException("Can't find KeyProvider for key " + keyName);
}
}
| 18,760 | 29.705401 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.io.PrintStream;
import java.security.InvalidParameterException;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This program is the CLI utility for the KeyProvider facilities in Hadoop.
*/
public class KeyShell extends Configured implements Tool {
final static private String USAGE_PREFIX = "Usage: hadoop key " +
"[generic options]\n";
final static private String COMMANDS =
" [-help]\n" +
" [" + CreateCommand.USAGE + "]\n" +
" [" + RollCommand.USAGE + "]\n" +
" [" + DeleteCommand.USAGE + "]\n" +
" [" + ListCommand.USAGE + "]\n";
private static final String LIST_METADATA = "keyShell.list.metadata";
private boolean interactive = true;
private Command command = null;
/** allows stdout to be captured if necessary */
public PrintStream out = System.out;
/** allows stderr to be captured if necessary */
public PrintStream err = System.err;
private boolean userSuppliedProvider = false;
/**
* Primary entry point for the KeyShell; called via main().
*
* @param args Command line arguments.
* @return 0 on success and 1 on failure. This value is passed back to
* the unix shell, so we must follow shell return code conventions:
* the return code is an unsigned character, and 0 means success, and
* small positive integers mean failure.
* @throws Exception
*/
@Override
public int run(String[] args) throws Exception {
int exitCode = 0;
try {
exitCode = init(args);
if (exitCode != 0) {
return exitCode;
}
if (command.validate()) {
command.execute();
} else {
exitCode = 1;
}
} catch (Exception e) {
e.printStackTrace(err);
return 1;
}
return exitCode;
}
/**
* Parse the command line arguments and initialize the data
* <pre>
* % hadoop key create keyName [-size size] [-cipher algorithm]
* [-provider providerPath]
* % hadoop key roll keyName [-provider providerPath]
* % hadoop key list [-provider providerPath]
* % hadoop key delete keyName [-provider providerPath] [-i]
* </pre>
* @param args Command line arguments.
* @return 0 on success, 1 on failure.
* @throws IOException
*/
private int init(String[] args) throws IOException {
final Options options = KeyProvider.options(getConf());
final Map<String, String> attributes = new HashMap<String, String>();
for (int i = 0; i < args.length; i++) { // parse command line
boolean moreTokens = (i < args.length - 1);
if (args[i].equals("create")) {
String keyName = "-help";
if (moreTokens) {
keyName = args[++i];
}
command = new CreateCommand(keyName, options);
if ("-help".equals(keyName)) {
printKeyShellUsage();
return 1;
}
} else if (args[i].equals("delete")) {
String keyName = "-help";
if (moreTokens) {
keyName = args[++i];
}
command = new DeleteCommand(keyName);
if ("-help".equals(keyName)) {
printKeyShellUsage();
return 1;
}
} else if (args[i].equals("roll")) {
String keyName = "-help";
if (moreTokens) {
keyName = args[++i];
}
command = new RollCommand(keyName);
if ("-help".equals(keyName)) {
printKeyShellUsage();
return 1;
}
} else if ("list".equals(args[i])) {
command = new ListCommand();
} else if ("-size".equals(args[i]) && moreTokens) {
options.setBitLength(Integer.parseInt(args[++i]));
} else if ("-cipher".equals(args[i]) && moreTokens) {
options.setCipher(args[++i]);
} else if ("-description".equals(args[i]) && moreTokens) {
options.setDescription(args[++i]);
} else if ("-attr".equals(args[i]) && moreTokens) {
final String attrval[] = args[++i].split("=", 2);
final String attr = attrval[0].trim();
final String val = attrval[1].trim();
if (attr.isEmpty() || val.isEmpty()) {
out.println("\nAttributes must be in attribute=value form, " +
"or quoted\nlike \"attribute = value\"\n");
printKeyShellUsage();
return 1;
}
if (attributes.containsKey(attr)) {
out.println("\nEach attribute must correspond to only one value:\n" +
"atttribute \"" + attr + "\" was repeated\n" );
printKeyShellUsage();
return 1;
}
attributes.put(attr, val);
} else if ("-provider".equals(args[i]) && moreTokens) {
userSuppliedProvider = true;
getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
} else if ("-metadata".equals(args[i])) {
getConf().setBoolean(LIST_METADATA, true);
} else if ("-f".equals(args[i]) || ("-force".equals(args[i]))) {
interactive = false;
} else if ("-help".equals(args[i])) {
printKeyShellUsage();
return 1;
} else {
printKeyShellUsage();
ToolRunner.printGenericCommandUsage(System.err);
return 1;
}
}
if (command == null) {
printKeyShellUsage();
return 1;
}
if (!attributes.isEmpty()) {
options.setAttributes(attributes);
}
return 0;
}
private void printKeyShellUsage() {
out.println(USAGE_PREFIX + COMMANDS);
if (command != null) {
out.println(command.getUsage());
} else {
out.println("=========================================================" +
"======");
out.println(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC);
out.println("=========================================================" +
"======");
out.println(RollCommand.USAGE + ":\n\n" + RollCommand.DESC);
out.println("=========================================================" +
"======");
out.println(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC);
out.println("=========================================================" +
"======");
out.println(ListCommand.USAGE + ":\n\n" + ListCommand.DESC);
}
}
private abstract class Command {
protected KeyProvider provider = null;
public boolean validate() {
return true;
}
protected KeyProvider getKeyProvider() {
KeyProvider provider = null;
List<KeyProvider> providers;
try {
providers = KeyProviderFactory.getProviders(getConf());
if (userSuppliedProvider) {
provider = providers.get(0);
} else {
for (KeyProvider p : providers) {
if (!p.isTransient()) {
provider = p;
break;
}
}
}
} catch (IOException e) {
e.printStackTrace(err);
}
return provider;
}
protected void printProviderWritten() {
out.println(provider + " has been updated.");
}
protected void warnIfTransientProvider() {
if (provider.isTransient()) {
out.println("WARNING: you are modifying a transient provider.");
}
}
public abstract void execute() throws Exception;
public abstract String getUsage();
}
private class ListCommand extends Command {
public static final String USAGE =
"list [-provider <provider>] [-metadata] [-help]";
public static final String DESC =
"The list subcommand displays the keynames contained within\n" +
"a particular provider as configured in core-site.xml or\n" +
"specified with the -provider argument. -metadata displays\n" +
"the metadata.";
private boolean metadata = false;
public boolean validate() {
boolean rc = true;
provider = getKeyProvider();
if (provider == null) {
out.println("There are no non-transient KeyProviders configured.\n"
+ "Use the -provider option to specify a provider. If you\n"
+ "want to list a transient provider then you must use the\n"
+ "-provider argument.");
rc = false;
}
metadata = getConf().getBoolean(LIST_METADATA, false);
return rc;
}
public void execute() throws IOException {
try {
final List<String> keys = provider.getKeys();
out.println("Listing keys for KeyProvider: " + provider);
if (metadata) {
final Metadata[] meta =
provider.getKeysMetadata(keys.toArray(new String[keys.size()]));
for (int i = 0; i < meta.length; ++i) {
out.println(keys.get(i) + " : " + meta[i]);
}
} else {
for (String keyName : keys) {
out.println(keyName);
}
}
} catch (IOException e) {
out.println("Cannot list keys for KeyProvider: " + provider
+ ": " + e.toString());
throw e;
}
}
@Override
public String getUsage() {
return USAGE + ":\n\n" + DESC;
}
}
private class RollCommand extends Command {
public static final String USAGE = "roll <keyname> [-provider <provider>] [-help]";
public static final String DESC =
"The roll subcommand creates a new version for the specified key\n" +
"within the provider indicated using the -provider argument\n";
String keyName = null;
public RollCommand(String keyName) {
this.keyName = keyName;
}
public boolean validate() {
boolean rc = true;
provider = getKeyProvider();
if (provider == null) {
out.println("There are no valid KeyProviders configured. The key\n" +
"has not been rolled. Use the -provider option to specify\n" +
"a provider.");
rc = false;
}
if (keyName == null) {
out.println("Please provide a <keyname>.\n" +
"See the usage description by using -help.");
rc = false;
}
return rc;
}
public void execute() throws NoSuchAlgorithmException, IOException {
try {
warnIfTransientProvider();
out.println("Rolling key version from KeyProvider: "
+ provider + "\n for key name: " + keyName);
try {
provider.rollNewVersion(keyName);
provider.flush();
out.println(keyName + " has been successfully rolled.");
printProviderWritten();
} catch (NoSuchAlgorithmException e) {
out.println("Cannot roll key: " + keyName + " within KeyProvider: "
+ provider + ". " + e.toString());
throw e;
}
} catch (IOException e1) {
out.println("Cannot roll key: " + keyName + " within KeyProvider: "
+ provider + ". " + e1.toString());
throw e1;
}
}
@Override
public String getUsage() {
return USAGE + ":\n\n" + DESC;
}
}
private class DeleteCommand extends Command {
public static final String USAGE =
"delete <keyname> [-provider <provider>] [-f] [-help]";
public static final String DESC =
"The delete subcommand deletes all versions of the key\n" +
"specified by the <keyname> argument from within the\n" +
"provider specified -provider. The command asks for\n" +
"user confirmation unless -f is specified.";
String keyName = null;
boolean cont = true;
public DeleteCommand(String keyName) {
this.keyName = keyName;
}
@Override
public boolean validate() {
provider = getKeyProvider();
if (provider == null) {
out.println("There are no valid KeyProviders configured. Nothing\n"
+ "was deleted. Use the -provider option to specify a provider.");
return false;
}
if (keyName == null) {
out.println("There is no keyName specified. Please specify a " +
"<keyname>. See the usage description with -help.");
return false;
}
if (interactive) {
try {
cont = ToolRunner
.confirmPrompt("You are about to DELETE all versions of "
+ " key " + keyName + " from KeyProvider "
+ provider + ". Continue? ");
if (!cont) {
out.println(keyName + " has not been deleted.");
}
return cont;
} catch (IOException e) {
out.println(keyName + " will not be deleted.");
e.printStackTrace(err);
}
}
return true;
}
public void execute() throws IOException {
warnIfTransientProvider();
out.println("Deleting key: " + keyName + " from KeyProvider: "
+ provider);
if (cont) {
try {
provider.deleteKey(keyName);
provider.flush();
out.println(keyName + " has been successfully deleted.");
printProviderWritten();
} catch (IOException e) {
out.println(keyName + " has not been deleted. " + e.toString());
throw e;
}
}
}
@Override
public String getUsage() {
return USAGE + ":\n\n" + DESC;
}
}
private class CreateCommand extends Command {
public static final String USAGE =
"create <keyname> [-cipher <cipher>] [-size <size>]\n" +
" [-description <description>]\n" +
" [-attr <attribute=value>]\n" +
" [-provider <provider>] [-help]";
public static final String DESC =
"The create subcommand creates a new key for the name specified\n" +
"by the <keyname> argument within the provider specified by the\n" +
"-provider argument. You may specify a cipher with the -cipher\n" +
"argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
"The default keysize is 128. You may specify the requested key\n" +
"length using the -size argument. Arbitrary attribute=value\n" +
"style attributes may be specified using the -attr argument.\n" +
"-attr may be specified multiple times, once per attribute.\n";
final String keyName;
final Options options;
public CreateCommand(String keyName, Options options) {
this.keyName = keyName;
this.options = options;
}
public boolean validate() {
boolean rc = true;
provider = getKeyProvider();
if (provider == null) {
out.println("There are no valid KeyProviders configured. No key\n" +
" was created. You can use the -provider option to specify\n" +
" a provider to use.");
rc = false;
}
if (keyName == null) {
out.println("Please provide a <keyname>. See the usage description" +
" with -help.");
rc = false;
}
return rc;
}
public void execute() throws IOException, NoSuchAlgorithmException {
warnIfTransientProvider();
try {
provider.createKey(keyName, options);
provider.flush();
out.println(keyName + " has been successfully created with options "
+ options.toString() + ".");
printProviderWritten();
} catch (InvalidParameterException e) {
out.println(keyName + " has not been created. " + e.toString());
throw e;
} catch (IOException e) {
out.println(keyName + " has not been created. " + e.toString());
throw e;
} catch (NoSuchAlgorithmException e) {
out.println(keyName + " has not been created. " + e.toString());
throw e;
}
}
@Override
public String getUsage() {
return USAGE + ":\n\n" + DESC;
}
}
/**
* main() entry point for the KeyShell. While strictly speaking the
* return is void, it will System.exit() with a return code: 0 is for
* success and 1 for failure.
*
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new KeyShell(), args);
System.exit(res);
}
}
| 17,236 | 32.211946 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
/**
* A <code>KeyProviderExtension</code> implementation providing a short lived
* cache for <code>KeyVersions</code> and <code>Metadata</code>to avoid burst
* of requests to hit the underlying <code>KeyProvider</code>.
*/
public class CachingKeyProvider extends
KeyProviderExtension<CachingKeyProvider.CacheExtension> {
static class CacheExtension implements KeyProviderExtension.Extension {
private final KeyProvider provider;
private LoadingCache<String, KeyVersion> keyVersionCache;
private LoadingCache<String, KeyVersion> currentKeyCache;
private LoadingCache<String, Metadata> keyMetadataCache;
CacheExtension(KeyProvider prov, long keyTimeoutMillis,
long currKeyTimeoutMillis) {
this.provider = prov;
keyVersionCache =
CacheBuilder.newBuilder().expireAfterAccess(keyTimeoutMillis,
TimeUnit.MILLISECONDS)
.build(new CacheLoader<String, KeyVersion>() {
@Override
public KeyVersion load(String key) throws Exception {
KeyVersion kv = provider.getKeyVersion(key);
if (kv == null) {
throw new KeyNotFoundException();
}
return kv;
}
});
keyMetadataCache =
CacheBuilder.newBuilder().expireAfterAccess(keyTimeoutMillis,
TimeUnit.MILLISECONDS)
.build(new CacheLoader<String, Metadata>() {
@Override
public Metadata load(String key) throws Exception {
Metadata meta = provider.getMetadata(key);
if (meta == null) {
throw new KeyNotFoundException();
}
return meta;
}
});
currentKeyCache =
CacheBuilder.newBuilder().expireAfterWrite(currKeyTimeoutMillis,
TimeUnit.MILLISECONDS)
.build(new CacheLoader<String, KeyVersion>() {
@Override
public KeyVersion load(String key) throws Exception {
KeyVersion kv = provider.getCurrentKey(key);
if (kv == null) {
throw new KeyNotFoundException();
}
return kv;
}
});
}
}
@SuppressWarnings("serial")
private static class KeyNotFoundException extends Exception { }
public CachingKeyProvider(KeyProvider keyProvider, long keyTimeoutMillis,
long currKeyTimeoutMillis) {
super(keyProvider, new CacheExtension(keyProvider, keyTimeoutMillis,
currKeyTimeoutMillis));
}
@Override
public KeyVersion getCurrentKey(String name) throws IOException {
try {
return getExtension().currentKeyCache.get(name);
} catch (ExecutionException ex) {
Throwable cause = ex.getCause();
if (cause instanceof KeyNotFoundException) {
return null;
} else if (cause instanceof IOException) {
throw (IOException) cause;
} else {
throw new IOException(cause);
}
}
}
@Override
public KeyVersion getKeyVersion(String versionName)
throws IOException {
try {
return getExtension().keyVersionCache.get(versionName);
} catch (ExecutionException ex) {
Throwable cause = ex.getCause();
if (cause instanceof KeyNotFoundException) {
return null;
} else if (cause instanceof IOException) {
throw (IOException) cause;
} else {
throw new IOException(cause);
}
}
}
@Override
public void deleteKey(String name) throws IOException {
getKeyProvider().deleteKey(name);
getExtension().currentKeyCache.invalidate(name);
getExtension().keyMetadataCache.invalidate(name);
// invalidating all key versions as we don't know
// which ones belonged to the deleted key
getExtension().keyVersionCache.invalidateAll();
}
@Override
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
KeyVersion key = getKeyProvider().rollNewVersion(name, material);
getExtension().currentKeyCache.invalidate(name);
getExtension().keyMetadataCache.invalidate(name);
return key;
}
@Override
public KeyVersion rollNewVersion(String name)
throws NoSuchAlgorithmException, IOException {
KeyVersion key = getKeyProvider().rollNewVersion(name);
getExtension().currentKeyCache.invalidate(name);
getExtension().keyMetadataCache.invalidate(name);
return key;
}
@Override
public Metadata getMetadata(String name) throws IOException {
try {
return getExtension().keyMetadataCache.get(name);
} catch (ExecutionException ex) {
Throwable cause = ex.getCause();
if (cause instanceof KeyNotFoundException) {
return null;
} else if (cause instanceof IOException) {
throw (IOException) cause;
} else {
throw new IOException(cause);
}
}
}
}
| 6,113 | 33.937143 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
/**
* A simple LoadBalancing KMSClientProvider that round-robins requests
* across a provided array of KMSClientProviders. It also retries failed
* requests on the next available provider in the load balancer group. It
* only retries failed requests that result in an IOException, sending back
* all other Exceptions to the caller without retry.
*/
public class LoadBalancingKMSClientProvider extends KeyProvider implements
CryptoExtension,
KeyProviderDelegationTokenExtension.DelegationTokenExtension {
public static Logger LOG =
LoggerFactory.getLogger(LoadBalancingKMSClientProvider.class);
static interface ProviderCallable<T> {
public T call(KMSClientProvider provider) throws IOException, Exception;
}
@SuppressWarnings("serial")
static class WrapperException extends RuntimeException {
public WrapperException(Throwable cause) {
super(cause);
}
}
private final KMSClientProvider[] providers;
private final AtomicInteger currentIdx;
public LoadBalancingKMSClientProvider(KMSClientProvider[] providers,
Configuration conf) {
this(shuffle(providers), Time.monotonicNow(), conf);
}
@VisibleForTesting
LoadBalancingKMSClientProvider(KMSClientProvider[] providers, long seed,
Configuration conf) {
super(conf);
this.providers = providers;
this.currentIdx = new AtomicInteger((int)(seed % providers.length));
}
@VisibleForTesting
KMSClientProvider[] getProviders() {
return providers;
}
private <T> T doOp(ProviderCallable<T> op, int currPos)
throws IOException {
IOException ex = null;
for (int i = 0; i < providers.length; i++) {
KMSClientProvider provider = providers[(currPos + i) % providers.length];
try {
return op.call(provider);
} catch (IOException ioe) {
LOG.warn("KMS provider at [{}] threw an IOException [{}]!!",
provider.getKMSUrl(), ioe.getMessage());
ex = ioe;
} catch (Exception e) {
if (e instanceof RuntimeException) {
throw (RuntimeException)e;
} else {
throw new WrapperException(e);
}
}
}
if (ex != null) {
LOG.warn("Aborting since the Request has failed with all KMS"
+ " providers in the group. !!");
throw ex;
}
throw new IOException("No providers configured !!");
}
private int nextIdx() {
while (true) {
int current = currentIdx.get();
int next = (current + 1) % providers.length;
if (currentIdx.compareAndSet(current, next)) {
return current;
}
}
}
@Override
public Token<?>[]
addDelegationTokens(final String renewer, final Credentials credentials)
throws IOException {
return doOp(new ProviderCallable<Token<?>[]>() {
@Override
public Token<?>[] call(KMSClientProvider provider) throws IOException {
return provider.addDelegationTokens(renewer, credentials);
}
}, nextIdx());
}
// This request is sent to all providers in the load-balancing group
@Override
public void warmUpEncryptedKeys(String... keyNames) throws IOException {
for (KMSClientProvider provider : providers) {
try {
provider.warmUpEncryptedKeys(keyNames);
} catch (IOException ioe) {
LOG.error(
"Error warming up keys for provider with url"
+ "[" + provider.getKMSUrl() + "]");
}
}
}
// This request is sent to all providers in the load-balancing group
@Override
public void drain(String keyName) {
for (KMSClientProvider provider : providers) {
provider.drain(keyName);
}
}
@Override
public EncryptedKeyVersion
generateEncryptedKey(final String encryptionKeyName)
throws IOException, GeneralSecurityException {
try {
return doOp(new ProviderCallable<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion call(KMSClientProvider provider)
throws IOException, GeneralSecurityException {
return provider.generateEncryptedKey(encryptionKeyName);
}
}, nextIdx());
} catch (WrapperException we) {
throw (GeneralSecurityException) we.getCause();
}
}
@Override
public KeyVersion
decryptEncryptedKey(final EncryptedKeyVersion encryptedKeyVersion)
throws IOException, GeneralSecurityException {
try {
return doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider)
throws IOException, GeneralSecurityException {
return provider.decryptEncryptedKey(encryptedKeyVersion);
}
}, nextIdx());
} catch (WrapperException we) {
throw (GeneralSecurityException)we.getCause();
}
}
@Override
public KeyVersion getKeyVersion(final String versionName) throws IOException {
return doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.getKeyVersion(versionName);
}
}, nextIdx());
}
@Override
public List<String> getKeys() throws IOException {
return doOp(new ProviderCallable<List<String>>() {
@Override
public List<String> call(KMSClientProvider provider) throws IOException {
return provider.getKeys();
}
}, nextIdx());
}
@Override
public Metadata[] getKeysMetadata(final String... names) throws IOException {
return doOp(new ProviderCallable<Metadata[]>() {
@Override
public Metadata[] call(KMSClientProvider provider) throws IOException {
return provider.getKeysMetadata(names);
}
}, nextIdx());
}
@Override
public List<KeyVersion> getKeyVersions(final String name) throws IOException {
return doOp(new ProviderCallable<List<KeyVersion>>() {
@Override
public List<KeyVersion> call(KMSClientProvider provider)
throws IOException {
return provider.getKeyVersions(name);
}
}, nextIdx());
}
@Override
public KeyVersion getCurrentKey(final String name) throws IOException {
return doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.getCurrentKey(name);
}
}, nextIdx());
}
@Override
public Metadata getMetadata(final String name) throws IOException {
return doOp(new ProviderCallable<Metadata>() {
@Override
public Metadata call(KMSClientProvider provider) throws IOException {
return provider.getMetadata(name);
}
}, nextIdx());
}
@Override
public KeyVersion createKey(final String name, final byte[] material,
final Options options) throws IOException {
return doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.createKey(name, material, options);
}
}, nextIdx());
}
@Override
public KeyVersion createKey(final String name, final Options options)
throws NoSuchAlgorithmException, IOException {
try {
return doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException,
NoSuchAlgorithmException {
return provider.createKey(name, options);
}
}, nextIdx());
} catch (WrapperException e) {
throw (NoSuchAlgorithmException)e.getCause();
}
}
@Override
public void deleteKey(final String name) throws IOException {
doOp(new ProviderCallable<Void>() {
@Override
public Void call(KMSClientProvider provider) throws IOException {
provider.deleteKey(name);
return null;
}
}, nextIdx());
}
@Override
public KeyVersion rollNewVersion(final String name, final byte[] material)
throws IOException {
return doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.rollNewVersion(name, material);
}
}, nextIdx());
}
@Override
public KeyVersion rollNewVersion(final String name)
throws NoSuchAlgorithmException, IOException {
try {
return doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException,
NoSuchAlgorithmException {
return provider.rollNewVersion(name);
}
}, nextIdx());
} catch (WrapperException e) {
throw (NoSuchAlgorithmException)e.getCause();
}
}
// Close all providers in the LB group
@Override
public void close() throws IOException {
for (KMSClientProvider provider : providers) {
try {
provider.close();
} catch (IOException ioe) {
LOG.error("Error closing provider with url"
+ "[" + provider.getKMSUrl() + "]");
}
}
}
@Override
public void flush() throws IOException {
for (KMSClientProvider provider : providers) {
try {
provider.flush();
} catch (IOException ioe) {
LOG.error("Error flushing provider with url"
+ "[" + provider.getKMSUrl() + "]");
}
}
}
private static KMSClientProvider[] shuffle(KMSClientProvider[] providers) {
List<KMSClientProvider> list = Arrays.asList(providers);
Collections.shuffle(list);
return list.toArray(providers);
}
}
| 11,216 | 31.232759 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms;
import java.io.IOException;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A Utility class that maintains a Queue of entries for a given key. It tries
* to ensure that there is are always at-least <code>numValues</code> entries
* available for the client to consume for a particular key.
* It also uses an underlying Cache to evict queues for keys that have not been
* accessed for a configurable period of time.
* Implementing classes are required to implement the
* <code>QueueRefiller</code> interface that exposes a method to refill the
* queue, when empty
*/
@InterfaceAudience.Private
public class ValueQueue <E> {
/**
* QueueRefiller interface a client must implement to use this class
*/
public interface QueueRefiller <E> {
/**
* Method that has to be implemented by implementing classes to fill the
* Queue.
* @param keyName Key name
* @param keyQueue Queue that needs to be filled
* @param numValues number of Values to be added to the queue.
* @throws IOException
*/
public void fillQueueForKey(String keyName,
Queue<E> keyQueue, int numValues) throws IOException;
}
private static final String REFILL_THREAD =
ValueQueue.class.getName() + "_thread";
private final LoadingCache<String, LinkedBlockingQueue<E>> keyQueues;
private final ThreadPoolExecutor executor;
private final UniqueKeyBlockingQueue queue = new UniqueKeyBlockingQueue();
private final QueueRefiller<E> refiller;
private final SyncGenerationPolicy policy;
private final int numValues;
private final float lowWatermark;
private volatile boolean executorThreadsStarted = false;
/**
* A <code>Runnable</code> which takes a string name.
*/
private abstract static class NamedRunnable implements Runnable {
final String name;
private NamedRunnable(String keyName) {
this.name = keyName;
}
}
/**
* This backing blocking queue used in conjunction with the
* <code>ThreadPoolExecutor</code> used by the <code>ValueQueue</code>. This
* Queue accepts a task only if the task is not currently in the process
* of being run by a thread which is implied by the presence of the key
* in the <code>keysInProgress</code> set.
*
* NOTE: Only methods that ware explicitly called by the
* <code>ThreadPoolExecutor</code> need to be over-ridden.
*/
private static class UniqueKeyBlockingQueue extends
LinkedBlockingQueue<Runnable> {
private static final long serialVersionUID = -2152747693695890371L;
private HashSet<String> keysInProgress = new HashSet<String>();
@Override
public synchronized void put(Runnable e) throws InterruptedException {
if (keysInProgress.add(((NamedRunnable)e).name)) {
super.put(e);
}
}
@Override
public Runnable take() throws InterruptedException {
Runnable k = super.take();
if (k != null) {
keysInProgress.remove(((NamedRunnable)k).name);
}
return k;
}
@Override
public Runnable poll(long timeout, TimeUnit unit)
throws InterruptedException {
Runnable k = super.poll(timeout, unit);
if (k != null) {
keysInProgress.remove(((NamedRunnable)k).name);
}
return k;
}
}
/**
* Policy to decide how many values to return to client when client asks for
* "n" values and Queue is empty.
* This decides how many values to return when client calls "getAtMost"
*/
public static enum SyncGenerationPolicy {
ATLEAST_ONE, // Return atleast 1 value
LOW_WATERMARK, // Return min(n, lowWatermark * numValues) values
ALL // Return n values
}
/**
* Constructor takes the following tunable configuration parameters
* @param numValues The number of values cached in the Queue for a
* particular key.
* @param lowWatermark The ratio of (number of current entries/numValues)
* below which the <code>fillQueueForKey()</code> funciton will be
* invoked to fill the Queue.
* @param expiry Expiry time after which the Key and associated Queue are
* evicted from the cache.
* @param numFillerThreads Number of threads to use for the filler thread
* @param policy The SyncGenerationPolicy to use when client
* calls "getAtMost"
* @param refiller implementation of the QueueRefiller
*/
public ValueQueue(final int numValues, final float lowWatermark,
long expiry, int numFillerThreads, SyncGenerationPolicy policy,
final QueueRefiller<E> refiller) {
Preconditions.checkArgument(numValues > 0, "\"numValues\" must be > 0");
Preconditions.checkArgument(((lowWatermark > 0)&&(lowWatermark <= 1)),
"\"lowWatermark\" must be > 0 and <= 1");
Preconditions.checkArgument(expiry > 0, "\"expiry\" must be > 0");
Preconditions.checkArgument(numFillerThreads > 0,
"\"numFillerThreads\" must be > 0");
Preconditions.checkNotNull(policy, "\"policy\" must not be null");
this.refiller = refiller;
this.policy = policy;
this.numValues = numValues;
this.lowWatermark = lowWatermark;
keyQueues = CacheBuilder.newBuilder()
.expireAfterAccess(expiry, TimeUnit.MILLISECONDS)
.build(new CacheLoader<String, LinkedBlockingQueue<E>>() {
@Override
public LinkedBlockingQueue<E> load(String keyName)
throws Exception {
LinkedBlockingQueue<E> keyQueue =
new LinkedBlockingQueue<E>();
refiller.fillQueueForKey(keyName, keyQueue,
(int)(lowWatermark * numValues));
return keyQueue;
}
});
executor =
new ThreadPoolExecutor(numFillerThreads, numFillerThreads, 0L,
TimeUnit.MILLISECONDS, queue, new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(REFILL_THREAD).build());
}
public ValueQueue(final int numValues, final float lowWaterMark, long expiry,
int numFillerThreads, QueueRefiller<E> fetcher) {
this(numValues, lowWaterMark, expiry, numFillerThreads,
SyncGenerationPolicy.ALL, fetcher);
}
/**
* Initializes the Value Queues for the provided keys by calling the
* fill Method with "numInitValues" values
* @param keyNames Array of key Names
* @throws ExecutionException
*/
public void initializeQueuesForKeys(String... keyNames)
throws ExecutionException {
for (String keyName : keyNames) {
keyQueues.get(keyName);
}
}
/**
* This removes the value currently at the head of the Queue for the
* provided key. Will immediately fire the Queue filler function if key
* does not exist.
* If Queue exists but all values are drained, It will ask the generator
* function to add 1 value to Queue and then drain it.
* @param keyName String key name
* @return E the next value in the Queue
* @throws IOException
* @throws ExecutionException
*/
public E getNext(String keyName)
throws IOException, ExecutionException {
return getAtMost(keyName, 1).get(0);
}
/**
* Drains the Queue for the provided key.
*
* @param keyName the key to drain the Queue for
*/
public void drain(String keyName ) {
try {
keyQueues.get(keyName).clear();
} catch (ExecutionException ex) {
//NOP
}
}
/**
* Get size of the Queue for keyName
* @param keyName the key name
* @return int queue size
* @throws ExecutionException
*/
public int getSize(String keyName) throws ExecutionException {
return keyQueues.get(keyName).size();
}
/**
* This removes the "num" values currently at the head of the Queue for the
* provided key. Will immediately fire the Queue filler function if key
* does not exist
* How many values are actually returned is governed by the
* <code>SyncGenerationPolicy</code> specified by the user.
* @param keyName String key name
* @param num Minimum number of values to return.
* @return List<E> values returned
* @throws IOException
* @throws ExecutionException
*/
public List<E> getAtMost(String keyName, int num) throws IOException,
ExecutionException {
LinkedBlockingQueue<E> keyQueue = keyQueues.get(keyName);
// Using poll to avoid race condition..
LinkedList<E> ekvs = new LinkedList<E>();
try {
for (int i = 0; i < num; i++) {
E val = keyQueue.poll();
// If queue is empty now, Based on the provided SyncGenerationPolicy,
// figure out how many new values need to be generated synchronously
if (val == null) {
// Synchronous call to get remaining values
int numToFill = 0;
switch (policy) {
case ATLEAST_ONE:
numToFill = (ekvs.size() < 1) ? 1 : 0;
break;
case LOW_WATERMARK:
numToFill =
Math.min(num, (int) (lowWatermark * numValues)) - ekvs.size();
break;
case ALL:
numToFill = num - ekvs.size();
break;
}
// Synchronous fill if not enough values found
if (numToFill > 0) {
refiller.fillQueueForKey(keyName, ekvs, numToFill);
}
// Asynch task to fill > lowWatermark
if (i <= (int) (lowWatermark * numValues)) {
submitRefillTask(keyName, keyQueue);
}
return ekvs;
}
ekvs.add(val);
}
} catch (Exception e) {
throw new IOException("Exeption while contacting value generator ", e);
}
return ekvs;
}
private void submitRefillTask(final String keyName,
final Queue<E> keyQueue) throws InterruptedException {
if (!executorThreadsStarted) {
synchronized (this) {
if (!executorThreadsStarted) {
// To ensure all requests are first queued, make coreThreads =
// maxThreads
// and pre-start all the Core Threads.
executor.prestartAllCoreThreads();
executorThreadsStarted = true;
}
}
}
// The submit/execute method of the ThreadPoolExecutor is bypassed and
// the Runnable is directly put in the backing BlockingQueue so that we
// can control exactly how the runnable is inserted into the queue.
queue.put(
new NamedRunnable(keyName) {
@Override
public void run() {
int cacheSize = numValues;
int threshold = (int) (lowWatermark * (float) cacheSize);
// Need to ensure that only one refill task per key is executed
try {
if (keyQueue.size() < threshold) {
refiller.fillQueueForKey(name, keyQueue,
cacheSize - keyQueue.size());
}
} catch (final Exception e) {
throw new RuntimeException(e);
}
}
}
);
}
/**
* Cleanly shutdown
*/
public void shutdown() {
executor.shutdownNow();
}
}
| 12,458 | 34.495726 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.apache.http.client.utils.URIBuilder;
import org.codehaus.jackson.map.ObjectMapper;
import javax.net.ssl.HttpsURLConnection;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.reflect.UndeclaredThrowableException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLEncoder;
import java.security.GeneralSecurityException;
import java.security.NoSuchAlgorithmException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
/**
* KMS client <code>KeyProvider</code> implementation.
*/
@InterfaceAudience.Private
public class KMSClientProvider extends KeyProvider implements CryptoExtension,
KeyProviderDelegationTokenExtension.DelegationTokenExtension {
private static final String INVALID_SIGNATURE = "Invalid signature";
private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous requests are disallowed";
public static final String TOKEN_KIND = "kms-dt";
public static final String SCHEME_NAME = "kms";
private static final String UTF8 = "UTF-8";
private static final String CONTENT_TYPE = "Content-Type";
private static final String APPLICATION_JSON_MIME = "application/json";
private static final String HTTP_GET = "GET";
private static final String HTTP_POST = "POST";
private static final String HTTP_PUT = "PUT";
private static final String HTTP_DELETE = "DELETE";
private static final String CONFIG_PREFIX = "hadoop.security.kms.client.";
/* It's possible to specify a timeout, in seconds, in the config file */
public static final String TIMEOUT_ATTR = CONFIG_PREFIX + "timeout";
public static final int DEFAULT_TIMEOUT = 60;
/* Number of times to retry authentication in the event of auth failure
* (normally happens due to stale authToken)
*/
public static final String AUTH_RETRY = CONFIG_PREFIX
+ "authentication.retry-count";
public static final int DEFAULT_AUTH_RETRY = 1;
private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue;
private class EncryptedQueueRefiller implements
ValueQueue.QueueRefiller<EncryptedKeyVersion> {
@Override
public void fillQueueForKey(String keyName,
Queue<EncryptedKeyVersion> keyQueue, int numEKVs) throws IOException {
checkNotNull(keyName, "keyName");
Map<String, String> params = new HashMap<String, String>();
params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_GENERATE);
params.put(KMSRESTConstants.EEK_NUM_KEYS, "" + numEKVs);
URL url = createURL(KMSRESTConstants.KEY_RESOURCE, keyName,
KMSRESTConstants.EEK_SUB_RESOURCE, params);
HttpURLConnection conn = createConnection(url, HTTP_GET);
conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
List response = call(conn, null,
HttpURLConnection.HTTP_OK, List.class);
List<EncryptedKeyVersion> ekvs =
parseJSONEncKeyVersion(keyName, response);
keyQueue.addAll(ekvs);
}
}
public static class KMSEncryptedKeyVersion extends EncryptedKeyVersion {
public KMSEncryptedKeyVersion(String keyName, String keyVersionName,
byte[] iv, String encryptedVersionName, byte[] keyMaterial) {
super(keyName, keyVersionName, iv, new KMSKeyVersion(null,
encryptedVersionName, keyMaterial));
}
}
@SuppressWarnings("rawtypes")
private static List<EncryptedKeyVersion>
parseJSONEncKeyVersion(String keyName, List valueList) {
List<EncryptedKeyVersion> ekvs = new LinkedList<EncryptedKeyVersion>();
if (!valueList.isEmpty()) {
for (Object values : valueList) {
Map valueMap = (Map) values;
String versionName = checkNotNull(
(String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
KMSRESTConstants.VERSION_NAME_FIELD);
byte[] iv = Base64.decodeBase64(checkNotNull(
(String) valueMap.get(KMSRESTConstants.IV_FIELD),
KMSRESTConstants.IV_FIELD));
Map encValueMap = checkNotNull((Map)
valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD),
KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD);
String encVersionName = checkNotNull((String)
encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
KMSRESTConstants.VERSION_NAME_FIELD);
byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String)
encValueMap.get(KMSRESTConstants.MATERIAL_FIELD),
KMSRESTConstants.MATERIAL_FIELD));
ekvs.add(new KMSEncryptedKeyVersion(keyName, versionName, iv,
encVersionName, encKeyMaterial));
}
}
return ekvs;
}
private static KeyVersion parseJSONKeyVersion(Map valueMap) {
KeyVersion keyVersion = null;
if (!valueMap.isEmpty()) {
byte[] material = (valueMap.containsKey(KMSRESTConstants.MATERIAL_FIELD))
? Base64.decodeBase64((String) valueMap.get(KMSRESTConstants.MATERIAL_FIELD))
: null;
String versionName = (String)valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD);
String keyName = (String)valueMap.get(KMSRESTConstants.NAME_FIELD);
keyVersion = new KMSKeyVersion(keyName, versionName, material);
}
return keyVersion;
}
@SuppressWarnings("unchecked")
private static Metadata parseJSONMetadata(Map valueMap) {
Metadata metadata = null;
if (!valueMap.isEmpty()) {
metadata = new KMSMetadata(
(String) valueMap.get(KMSRESTConstants.CIPHER_FIELD),
(Integer) valueMap.get(KMSRESTConstants.LENGTH_FIELD),
(String) valueMap.get(KMSRESTConstants.DESCRIPTION_FIELD),
(Map<String, String>) valueMap.get(KMSRESTConstants.ATTRIBUTES_FIELD),
new Date((Long) valueMap.get(KMSRESTConstants.CREATED_FIELD)),
(Integer) valueMap.get(KMSRESTConstants.VERSIONS_FIELD));
}
return metadata;
}
private static void writeJson(Map map, OutputStream os) throws IOException {
Writer writer = new OutputStreamWriter(os, Charsets.UTF_8);
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, map);
}
/**
* The factory to create KMSClientProvider, which is used by the
* ServiceLoader.
*/
public static class Factory extends KeyProviderFactory {
/**
* This provider expects URIs in the following form :
* kms://<PROTO>@<AUTHORITY>/<PATH>
*
* where :
* - PROTO = http or https
* - AUTHORITY = <HOSTS>[:<PORT>]
* - HOSTS = <HOSTNAME>[;<HOSTS>]
* - HOSTNAME = string
* - PORT = integer
*
* If multiple hosts are provider, the Factory will create a
* {@link LoadBalancingKMSClientProvider} that round-robins requests
* across the provided list of hosts.
*/
@Override
public KeyProvider createProvider(URI providerUri, Configuration conf)
throws IOException {
if (SCHEME_NAME.equals(providerUri.getScheme())) {
URL origUrl = new URL(extractKMSPath(providerUri).toString());
String authority = origUrl.getAuthority();
// check for ';' which delimits the backup hosts
if (Strings.isNullOrEmpty(authority)) {
throw new IOException(
"No valid authority in kms uri [" + origUrl + "]");
}
// Check if port is present in authority
// In the current scheme, all hosts have to run on the same port
int port = -1;
String hostsPart = authority;
if (authority.contains(":")) {
String[] t = authority.split(":");
try {
port = Integer.parseInt(t[1]);
} catch (Exception e) {
throw new IOException(
"Could not parse port in kms uri [" + origUrl + "]");
}
hostsPart = t[0];
}
return createProvider(providerUri, conf, origUrl, port, hostsPart);
}
return null;
}
private KeyProvider createProvider(URI providerUri, Configuration conf,
URL origUrl, int port, String hostsPart) throws IOException {
String[] hosts = hostsPart.split(";");
if (hosts.length == 1) {
return new KMSClientProvider(providerUri, conf);
} else {
KMSClientProvider[] providers = new KMSClientProvider[hosts.length];
for (int i = 0; i < hosts.length; i++) {
try {
providers[i] =
new KMSClientProvider(
new URI("kms", origUrl.getProtocol(), hosts[i], port,
origUrl.getPath(), null, null), conf);
} catch (URISyntaxException e) {
throw new IOException("Could not instantiate KMSProvider..", e);
}
}
return new LoadBalancingKMSClientProvider(providers, conf);
}
}
}
public static <T> T checkNotNull(T o, String name)
throws IllegalArgumentException {
if (o == null) {
throw new IllegalArgumentException("Parameter '" + name +
"' cannot be null");
}
return o;
}
public static String checkNotEmpty(String s, String name)
throws IllegalArgumentException {
checkNotNull(s, name);
if (s.isEmpty()) {
throw new IllegalArgumentException("Parameter '" + name +
"' cannot be empty");
}
return s;
}
private String kmsUrl;
private SSLFactory sslFactory;
private ConnectionConfigurator configurator;
private DelegationTokenAuthenticatedURL.Token authToken;
private final int authRetry;
private final UserGroupInformation actualUgi;
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("KMSClientProvider[");
sb.append(kmsUrl).append("]");
return sb.toString();
}
/**
* This small class exists to set the timeout values for a connection
*/
private static class TimeoutConnConfigurator
implements ConnectionConfigurator {
private ConnectionConfigurator cc;
private int timeout;
/**
* Sets the timeout and wraps another connection configurator
* @param timeout - will set both connect and read timeouts - in seconds
* @param cc - another configurator to wrap - may be null
*/
public TimeoutConnConfigurator(int timeout, ConnectionConfigurator cc) {
this.timeout = timeout;
this.cc = cc;
}
/**
* Calls the wrapped configure() method, then sets timeouts
* @param conn the {@link HttpURLConnection} instance to configure.
* @return the connection
* @throws IOException
*/
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
if (cc != null) {
conn = cc.configure(conn);
}
conn.setConnectTimeout(timeout * 1000); // conversion to milliseconds
conn.setReadTimeout(timeout * 1000);
return conn;
}
}
public KMSClientProvider(URI uri, Configuration conf) throws IOException {
super(conf);
kmsUrl = createServiceURL(extractKMSPath(uri));
if ("https".equalsIgnoreCase(new URL(kmsUrl).getProtocol())) {
sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
try {
sslFactory.init();
} catch (GeneralSecurityException ex) {
throw new IOException(ex);
}
}
int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
authRetry = conf.getInt(AUTH_RETRY, DEFAULT_AUTH_RETRY);
configurator = new TimeoutConnConfigurator(timeout, sslFactory);
encKeyVersionQueue =
new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
conf.getInt(
CommonConfigurationKeysPublic.KMS_CLIENT_ENC_KEY_CACHE_SIZE,
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT),
conf.getFloat(
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK,
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT),
conf.getInt(
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS,
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT),
conf.getInt(
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS,
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
new EncryptedQueueRefiller());
authToken = new DelegationTokenAuthenticatedURL.Token();
actualUgi =
(UserGroupInformation.getCurrentUser().getAuthenticationMethod() ==
UserGroupInformation.AuthenticationMethod.PROXY) ? UserGroupInformation
.getCurrentUser().getRealUser() : UserGroupInformation
.getCurrentUser();
}
private static Path extractKMSPath(URI uri) throws MalformedURLException, IOException {
return ProviderUtils.unnestUri(uri);
}
private static String createServiceURL(Path path) throws IOException {
String str = new URL(path.toString()).toExternalForm();
if (str.endsWith("/")) {
str = str.substring(0, str.length() - 1);
}
return new URL(str + KMSRESTConstants.SERVICE_VERSION + "/").
toExternalForm();
}
private URL createURL(String collection, String resource, String subResource,
Map<String, ?> parameters) throws IOException {
try {
StringBuilder sb = new StringBuilder();
sb.append(kmsUrl);
if (collection != null) {
sb.append(collection);
if (resource != null) {
sb.append("/").append(URLEncoder.encode(resource, UTF8));
if (subResource != null) {
sb.append("/").append(subResource);
}
}
}
URIBuilder uriBuilder = new URIBuilder(sb.toString());
if (parameters != null) {
for (Map.Entry<String, ?> param : parameters.entrySet()) {
Object value = param.getValue();
if (value instanceof String) {
uriBuilder.addParameter(param.getKey(), (String) value);
} else {
for (String s : (String[]) value) {
uriBuilder.addParameter(param.getKey(), s);
}
}
}
}
return uriBuilder.build().toURL();
} catch (URISyntaxException ex) {
throw new IOException(ex);
}
}
private HttpURLConnection configureConnection(HttpURLConnection conn)
throws IOException {
if (sslFactory != null) {
HttpsURLConnection httpsConn = (HttpsURLConnection) conn;
try {
httpsConn.setSSLSocketFactory(sslFactory.createSSLSocketFactory());
} catch (GeneralSecurityException ex) {
throw new IOException(ex);
}
httpsConn.setHostnameVerifier(sslFactory.getHostnameVerifier());
}
return conn;
}
private HttpURLConnection createConnection(final URL url, String method)
throws IOException {
HttpURLConnection conn;
try {
// if current UGI is different from UGI at constructor time, behave as
// proxyuser
UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
final String doAsUser = (currentUgi.getAuthenticationMethod() ==
UserGroupInformation.AuthenticationMethod.PROXY)
? currentUgi.getShortUserName() : null;
// creating the HTTP connection using the current UGI at constructor time
conn = actualUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
@Override
public HttpURLConnection run() throws Exception {
DelegationTokenAuthenticatedURL authUrl =
new DelegationTokenAuthenticatedURL(configurator);
return authUrl.openConnection(url, authToken, doAsUser);
}
});
} catch (IOException ex) {
throw ex;
} catch (UndeclaredThrowableException ex) {
throw new IOException(ex.getUndeclaredThrowable());
} catch (Exception ex) {
throw new IOException(ex);
}
conn.setUseCaches(false);
conn.setRequestMethod(method);
if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
conn.setDoOutput(true);
}
conn = configureConnection(conn);
return conn;
}
private <T> T call(HttpURLConnection conn, Map jsonOutput,
int expectedResponse, Class<T> klass) throws IOException {
return call(conn, jsonOutput, expectedResponse, klass, authRetry);
}
private <T> T call(HttpURLConnection conn, Map jsonOutput,
int expectedResponse, Class<T> klass, int authRetryCount)
throws IOException {
T ret = null;
try {
if (jsonOutput != null) {
writeJson(jsonOutput, conn.getOutputStream());
}
} catch (IOException ex) {
conn.getInputStream().close();
throw ex;
}
if ((conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN
&& (conn.getResponseMessage().equals(ANONYMOUS_REQUESTS_DISALLOWED) ||
conn.getResponseMessage().contains(INVALID_SIGNATURE)))
|| conn.getResponseCode() == HttpURLConnection.HTTP_UNAUTHORIZED) {
// Ideally, this should happen only when there is an Authentication
// failure. Unfortunately, the AuthenticationFilter returns 403 when it
// cannot authenticate (Since a 401 requires Server to send
// WWW-Authenticate header as well)..
KMSClientProvider.this.authToken =
new DelegationTokenAuthenticatedURL.Token();
if (authRetryCount > 0) {
String contentType = conn.getRequestProperty(CONTENT_TYPE);
String requestMethod = conn.getRequestMethod();
URL url = conn.getURL();
conn = createConnection(url, requestMethod);
conn.setRequestProperty(CONTENT_TYPE, contentType);
return call(conn, jsonOutput, expectedResponse, klass,
authRetryCount - 1);
}
}
try {
AuthenticatedURL.extractToken(conn, authToken);
} catch (AuthenticationException e) {
// Ignore the AuthExceptions.. since we are just using the method to
// extract and set the authToken.. (Workaround till we actually fix
// AuthenticatedURL properly to set authToken post initialization)
}
HttpExceptionUtils.validateResponse(conn, expectedResponse);
if (conn.getContentType() != null
&& conn.getContentType().trim().toLowerCase()
.startsWith(APPLICATION_JSON_MIME)
&& klass != null) {
ObjectMapper mapper = new ObjectMapper();
InputStream is = null;
try {
is = conn.getInputStream();
ret = mapper.readValue(is, klass);
} catch (IOException ex) {
if (is != null) {
is.close();
}
throw ex;
} finally {
if (is != null) {
is.close();
}
}
}
return ret;
}
public static class KMSKeyVersion extends KeyVersion {
public KMSKeyVersion(String keyName, String versionName, byte[] material) {
super(keyName, versionName, material);
}
}
@Override
public KeyVersion getKeyVersion(String versionName) throws IOException {
checkNotEmpty(versionName, "versionName");
URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE,
versionName, null, null);
HttpURLConnection conn = createConnection(url, HTTP_GET);
Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class);
return parseJSONKeyVersion(response);
}
@Override
public KeyVersion getCurrentKey(String name) throws IOException {
checkNotEmpty(name, "name");
URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
KMSRESTConstants.CURRENT_VERSION_SUB_RESOURCE, null);
HttpURLConnection conn = createConnection(url, HTTP_GET);
Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class);
return parseJSONKeyVersion(response);
}
@Override
@SuppressWarnings("unchecked")
public List<String> getKeys() throws IOException {
URL url = createURL(KMSRESTConstants.KEYS_NAMES_RESOURCE, null, null,
null);
HttpURLConnection conn = createConnection(url, HTTP_GET);
List response = call(conn, null, HttpURLConnection.HTTP_OK, List.class);
return (List<String>) response;
}
public static class KMSMetadata extends Metadata {
public KMSMetadata(String cipher, int bitLength, String description,
Map<String, String> attributes, Date created, int versions) {
super(cipher, bitLength, description, attributes, created, versions);
}
}
// breaking keyNames into sets to keep resulting URL undler 2000 chars
private List<String[]> createKeySets(String[] keyNames) {
List<String[]> list = new ArrayList<String[]>();
List<String> batch = new ArrayList<String>();
int batchLen = 0;
for (String name : keyNames) {
int additionalLen = KMSRESTConstants.KEY.length() + 1 + name.length();
batchLen += additionalLen;
// topping at 1500 to account for initial URL and encoded names
if (batchLen > 1500) {
list.add(batch.toArray(new String[batch.size()]));
batch = new ArrayList<String>();
batchLen = additionalLen;
}
batch.add(name);
}
if (!batch.isEmpty()) {
list.add(batch.toArray(new String[batch.size()]));
}
return list;
}
@Override
@SuppressWarnings("unchecked")
public Metadata[] getKeysMetadata(String ... keyNames) throws IOException {
List<Metadata> keysMetadata = new ArrayList<Metadata>();
List<String[]> keySets = createKeySets(keyNames);
for (String[] keySet : keySets) {
if (keyNames.length > 0) {
Map<String, Object> queryStr = new HashMap<String, Object>();
queryStr.put(KMSRESTConstants.KEY, keySet);
URL url = createURL(KMSRESTConstants.KEYS_METADATA_RESOURCE, null,
null, queryStr);
HttpURLConnection conn = createConnection(url, HTTP_GET);
List<Map> list = call(conn, null, HttpURLConnection.HTTP_OK, List.class);
for (Map map : list) {
keysMetadata.add(parseJSONMetadata(map));
}
}
}
return keysMetadata.toArray(new Metadata[keysMetadata.size()]);
}
private KeyVersion createKeyInternal(String name, byte[] material,
Options options)
throws NoSuchAlgorithmException, IOException {
checkNotEmpty(name, "name");
checkNotNull(options, "options");
Map<String, Object> jsonKey = new HashMap<String, Object>();
jsonKey.put(KMSRESTConstants.NAME_FIELD, name);
jsonKey.put(KMSRESTConstants.CIPHER_FIELD, options.getCipher());
jsonKey.put(KMSRESTConstants.LENGTH_FIELD, options.getBitLength());
if (material != null) {
jsonKey.put(KMSRESTConstants.MATERIAL_FIELD,
Base64.encodeBase64String(material));
}
if (options.getDescription() != null) {
jsonKey.put(KMSRESTConstants.DESCRIPTION_FIELD,
options.getDescription());
}
if (options.getAttributes() != null && !options.getAttributes().isEmpty()) {
jsonKey.put(KMSRESTConstants.ATTRIBUTES_FIELD, options.getAttributes());
}
URL url = createURL(KMSRESTConstants.KEYS_RESOURCE, null, null, null);
HttpURLConnection conn = createConnection(url, HTTP_POST);
conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
Map response = call(conn, jsonKey, HttpURLConnection.HTTP_CREATED,
Map.class);
return parseJSONKeyVersion(response);
}
@Override
public KeyVersion createKey(String name, Options options)
throws NoSuchAlgorithmException, IOException {
return createKeyInternal(name, null, options);
}
@Override
public KeyVersion createKey(String name, byte[] material, Options options)
throws IOException {
checkNotNull(material, "material");
try {
return createKeyInternal(name, material, options);
} catch (NoSuchAlgorithmException ex) {
throw new RuntimeException("It should not happen", ex);
}
}
private KeyVersion rollNewVersionInternal(String name, byte[] material)
throws NoSuchAlgorithmException, IOException {
checkNotEmpty(name, "name");
Map<String, String> jsonMaterial = new HashMap<String, String>();
if (material != null) {
jsonMaterial.put(KMSRESTConstants.MATERIAL_FIELD,
Base64.encodeBase64String(material));
}
URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, null, null);
HttpURLConnection conn = createConnection(url, HTTP_POST);
conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
Map response = call(conn, jsonMaterial,
HttpURLConnection.HTTP_OK, Map.class);
KeyVersion keyVersion = parseJSONKeyVersion(response);
encKeyVersionQueue.drain(name);
return keyVersion;
}
@Override
public KeyVersion rollNewVersion(String name)
throws NoSuchAlgorithmException, IOException {
return rollNewVersionInternal(name, null);
}
@Override
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
checkNotNull(material, "material");
try {
return rollNewVersionInternal(name, material);
} catch (NoSuchAlgorithmException ex) {
throw new RuntimeException("It should not happen", ex);
}
}
@Override
public EncryptedKeyVersion generateEncryptedKey(
String encryptionKeyName) throws IOException, GeneralSecurityException {
try {
return encKeyVersionQueue.getNext(encryptionKeyName);
} catch (ExecutionException e) {
if (e.getCause() instanceof SocketTimeoutException) {
throw (SocketTimeoutException)e.getCause();
}
throw new IOException(e);
}
}
@SuppressWarnings("rawtypes")
@Override
public KeyVersion decryptEncryptedKey(
EncryptedKeyVersion encryptedKeyVersion) throws IOException,
GeneralSecurityException {
checkNotNull(encryptedKeyVersion.getEncryptionKeyVersionName(),
"versionName");
checkNotNull(encryptedKeyVersion.getEncryptedKeyIv(), "iv");
Preconditions.checkArgument(
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
.equals(KeyProviderCryptoExtension.EEK),
"encryptedKey version name must be '%s', is '%s'",
KeyProviderCryptoExtension.EEK,
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
);
checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey");
Map<String, String> params = new HashMap<String, String>();
params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_DECRYPT);
Map<String, Object> jsonPayload = new HashMap<String, Object>();
jsonPayload.put(KMSRESTConstants.NAME_FIELD,
encryptedKeyVersion.getEncryptionKeyName());
jsonPayload.put(KMSRESTConstants.IV_FIELD, Base64.encodeBase64String(
encryptedKeyVersion.getEncryptedKeyIv()));
jsonPayload.put(KMSRESTConstants.MATERIAL_FIELD, Base64.encodeBase64String(
encryptedKeyVersion.getEncryptedKeyVersion().getMaterial()));
URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE,
encryptedKeyVersion.getEncryptionKeyVersionName(),
KMSRESTConstants.EEK_SUB_RESOURCE, params);
HttpURLConnection conn = createConnection(url, HTTP_POST);
conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
Map response =
call(conn, jsonPayload, HttpURLConnection.HTTP_OK, Map.class);
return parseJSONKeyVersion(response);
}
@Override
public List<KeyVersion> getKeyVersions(String name) throws IOException {
checkNotEmpty(name, "name");
URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
KMSRESTConstants.VERSIONS_SUB_RESOURCE, null);
HttpURLConnection conn = createConnection(url, HTTP_GET);
List response = call(conn, null, HttpURLConnection.HTTP_OK, List.class);
List<KeyVersion> versions = null;
if (!response.isEmpty()) {
versions = new ArrayList<KeyVersion>();
for (Object obj : response) {
versions.add(parseJSONKeyVersion((Map) obj));
}
}
return versions;
}
@Override
public Metadata getMetadata(String name) throws IOException {
checkNotEmpty(name, "name");
URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
KMSRESTConstants.METADATA_SUB_RESOURCE, null);
HttpURLConnection conn = createConnection(url, HTTP_GET);
Map response = call(conn, null, HttpURLConnection.HTTP_OK, Map.class);
return parseJSONMetadata(response);
}
@Override
public void deleteKey(String name) throws IOException {
checkNotEmpty(name, "name");
URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name, null, null);
HttpURLConnection conn = createConnection(url, HTTP_DELETE);
call(conn, null, HttpURLConnection.HTTP_OK, null);
}
@Override
public void flush() throws IOException {
// NOP
// the client does not keep any local state, thus flushing is not required
// because of the client.
// the server should not keep in memory state on behalf of clients either.
}
@Override
public void warmUpEncryptedKeys(String... keyNames)
throws IOException {
try {
encKeyVersionQueue.initializeQueuesForKeys(keyNames);
} catch (ExecutionException e) {
throw new IOException(e);
}
}
@Override
public void drain(String keyName) {
encKeyVersionQueue.drain(keyName);
}
@VisibleForTesting
public int getEncKeyQueueSize(String keyName) throws IOException {
try {
return encKeyVersionQueue.getSize(keyName);
} catch (ExecutionException e) {
throw new IOException(e);
}
}
@Override
public Token<?>[] addDelegationTokens(final String renewer,
Credentials credentials) throws IOException {
Token<?>[] tokens = null;
Text dtService = getDelegationTokenService();
Token<?> token = credentials.getToken(dtService);
if (token == null) {
final URL url = createURL(null, null, null, null);
final DelegationTokenAuthenticatedURL authUrl =
new DelegationTokenAuthenticatedURL(configurator);
try {
// 'actualUGI' is the UGI of the user creating the client
// It is possible that the creator of the KMSClientProvier
// calls this method on behalf of a proxyUser (the doAsUser).
// In which case this call has to be made as the proxy user.
UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
final String doAsUser = (currentUgi.getAuthenticationMethod() ==
UserGroupInformation.AuthenticationMethod.PROXY)
? currentUgi.getShortUserName() : null;
token = actualUgi.doAs(new PrivilegedExceptionAction<Token<?>>() {
@Override
public Token<?> run() throws Exception {
// Not using the cached token here.. Creating a new token here
// everytime.
return authUrl.getDelegationToken(url,
new DelegationTokenAuthenticatedURL.Token(), renewer, doAsUser);
}
});
if (token != null) {
credentials.addToken(token.getService(), token);
tokens = new Token<?>[] { token };
} else {
throw new IOException("Got NULL as delegation token");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
throw new IOException(e);
}
}
return tokens;
}
private Text getDelegationTokenService() throws IOException {
URL url = new URL(kmsUrl);
InetSocketAddress addr = new InetSocketAddress(url.getHost(),
url.getPort());
Text dtService = SecurityUtil.buildTokenService(addr);
return dtService;
}
/**
* Shutdown valueQueue executor threads
*/
@Override
public void close() throws IOException {
try {
encKeyVersionQueue.shutdown();
} catch (Exception e) {
throw new IOException(e);
} finally {
if (sslFactory != null) {
sslFactory.destroy();
}
}
}
@VisibleForTesting
String getKMSUrl() {
return kmsUrl;
}
}
| 35,027 | 36.868108 | 98 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.