repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRolloverSignerSecretProvider.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import org.junit.Assert;
import org.junit.Test;
public class TestRolloverSignerSecretProvider {
@Test
public void testGetAndRollSecrets() throws Exception {
long rolloverFrequency = 15 * 1000; // rollover every 15 sec
byte[] secret1 = "doctor".getBytes();
byte[] secret2 = "who".getBytes();
byte[] secret3 = "tardis".getBytes();
TRolloverSignerSecretProvider secretProvider =
new TRolloverSignerSecretProvider(
new byte[][]{secret1, secret2, secret3});
try {
secretProvider.init(null, null, rolloverFrequency);
byte[] currentSecret = secretProvider.getCurrentSecret();
byte[][] allSecrets = secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret1, currentSecret);
Assert.assertEquals(2, allSecrets.length);
Assert.assertArrayEquals(secret1, allSecrets[0]);
Assert.assertNull(allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
currentSecret = secretProvider.getCurrentSecret();
allSecrets = secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret2, currentSecret);
Assert.assertEquals(2, allSecrets.length);
Assert.assertArrayEquals(secret2, allSecrets[0]);
Assert.assertArrayEquals(secret1, allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
currentSecret = secretProvider.getCurrentSecret();
allSecrets = secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret3, currentSecret);
Assert.assertEquals(2, allSecrets.length);
Assert.assertArrayEquals(secret3, allSecrets[0]);
Assert.assertArrayEquals(secret2, allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
} finally {
secretProvider.destroy();
}
}
class TRolloverSignerSecretProvider extends RolloverSignerSecretProvider {
private byte[][] newSecretSequence;
private int newSecretSequenceIndex;
public TRolloverSignerSecretProvider(byte[][] newSecretSequence)
throws Exception {
super();
this.newSecretSequence = newSecretSequence;
this.newSecretSequenceIndex = 0;
}
@Override
protected byte[] generateNewSecret() {
return newSecretSequence[newSecretSequenceIndex++];
}
}
}
| 2,885 | 35.075 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
|
package org.apache.hadoop.security.authentication.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.hadoop.security.authentication.KerberosTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.Assert;
public class TestKerberosName {
@Before
public void setUp() throws Exception {
System.setProperty("java.security.krb5.realm", KerberosTestUtils.getRealm());
System.setProperty("java.security.krb5.kdc", "localhost:88");
String rules =
"RULE:[1:$1@$0](.*@YAHOO\\.COM)s/@.*//\n" +
"RULE:[2:$1](johndoe)s/^.*$/guest/\n" +
"RULE:[2:$1;$2](^.*;admin$)s/;admin$//\n" +
"RULE:[2:$2](root)\n" +
"DEFAULT";
KerberosName.setRules(rules);
KerberosName.printRules();
}
private void checkTranslation(String from, String to) throws Exception {
System.out.println("Translate " + from);
KerberosName nm = new KerberosName(from);
String simple = nm.getShortName();
System.out.println("to " + simple);
Assert.assertEquals("short name incorrect", to, simple);
}
@Test
public void testRules() throws Exception {
checkTranslation("omalley@" + KerberosTestUtils.getRealm(), "omalley");
checkTranslation("hdfs/10.0.0.1@" + KerberosTestUtils.getRealm(), "hdfs");
checkTranslation("[email protected]", "oom");
checkTranslation("johndoe/[email protected]", "guest");
checkTranslation("joe/[email protected]", "joe");
checkTranslation("joe/[email protected]", "root");
}
private void checkBadName(String name) {
System.out.println("Checking " + name + " to ensure it is bad.");
try {
new KerberosName(name);
Assert.fail("didn't get exception for " + name);
} catch (IllegalArgumentException iae) {
// PASS
}
}
private void checkBadTranslation(String from) {
System.out.println("Checking bad translation for " + from);
KerberosName nm = new KerberosName(from);
try {
nm.getShortName();
Assert.fail("didn't get exception for " + from);
} catch (IOException ie) {
// PASS
}
}
@Test
public void testAntiPatterns() throws Exception {
checkBadName("owen/owen/[email protected]");
checkBadName("owen@foo/bar.com");
checkBadTranslation("[email protected]");
checkBadTranslation("root/[email protected]");
}
@Test
public void testToLowerCase() throws Exception {
String rules =
"RULE:[1:$1]/L\n" +
"RULE:[2:$1]/L\n" +
"RULE:[2:$1;$2](^.*;admin$)s/;admin$///L\n" +
"RULE:[2:$1;$2](^.*;guest$)s/;guest$//g/L\n" +
"DEFAULT";
KerberosName.setRules(rules);
KerberosName.printRules();
checkTranslation("[email protected]", "joe");
checkTranslation("Joe/[email protected]", "joe");
checkTranslation("Joe/[email protected]", "joe");
checkTranslation("Joe/[email protected]", "joe");
}
@After
public void clear() {
System.clearProperty("java.security.krb5.realm");
System.clearProperty("java.security.krb5.kdc");
}
}
| 3,792 | 31.698276 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authentication.util;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.security.interfaces.RSAPublicKey;
import javax.servlet.ServletException;
import org.junit.Test;
public class TestCertificateUtil {
@Test
public void testInvalidPEMWithHeaderAndFooter() throws Exception {
String pem = "-----BEGIN CERTIFICATE-----\n"
+ "MIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w"
+ "CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl"
+ "c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x"
+ "CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv"
+ "b3AxDTALBgNVBAsTBFRlc3QxEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB"
+ "jQAwgYkCgYEAwpfpLdi7dWTHNzETt+L7618/dWUQFb/C7o1jIxFgbKOVIB6d5YmvUbJck5PYxFkz"
+ "C25fmU5H71WGOI1Kle5TFDmIo+hqh5xqu1YNRZz9i6D94g+2AyYr9BpvH4ZfdHs7r9AU7c3kq68V"
+ "7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim"
+ "Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy"
+ "9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX"
+ "Mzc1xA==" + "\n-----END CERTIFICATE-----";
try {
CertificateUtil.parseRSAPublicKey(pem);
fail("Should not have thrown ServletException");
} catch (ServletException se) {
assertTrue(se.getMessage().contains("PEM header"));
}
}
@Test
public void testCorruptPEM() throws Exception {
String pem = "MIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w"
+ "CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl"
+ "c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x"
+ "CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv"
+ "b3AxDTALBgNVBAsTBFRlc3QxEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB"
+ "jQAwgYkCgYEAwpfpLdi7dWTHNzETt+L7618/dWUQFb/C7o1jIxFgbKOVIB6d5YmvUbJck5PYxFkz"
+ "C25fmU5H71WGOI1Kle5TFDmIo+hqh5xqu1YNRZz9i6D94g+2AyYr9BpvH4ZfdHs7r9AU7c3kq68V"
+ "7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim"
+ "Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy"
+ "9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX"
+ "Mzc1xA++";
try {
CertificateUtil.parseRSAPublicKey(pem);
fail("Should not have thrown ServletException");
} catch (ServletException se) {
assertTrue(se.getMessage().contains("corrupt"));
}
}
@Test
public void testValidPEM() throws Exception {
String pem = "MIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w"
+ "CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl"
+ "c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x"
+ "CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv"
+ "b3AxDTALBgNVBAsTBFRlc3QxEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB"
+ "jQAwgYkCgYEAwpfpLdi7dWTHNzETt+L7618/dWUQFb/C7o1jIxFgbKOVIB6d5YmvUbJck5PYxFkz"
+ "C25fmU5H71WGOI1Kle5TFDmIo+hqh5xqu1YNRZz9i6D94g+2AyYr9BpvH4ZfdHs7r9AU7c3kq68V"
+ "7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim"
+ "Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy"
+ "9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX"
+ "Mzc1xA==";
try {
RSAPublicKey pk = CertificateUtil.parseRSAPublicKey(pem);
assertTrue(pk != null);
assertTrue(pk.getAlgorithm().equals("RSA"));
} catch (ServletException se) {
fail("Should not have thrown ServletException");
}
}
}
| 4,802 | 48.515464 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security.authentication.util;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.regex.Pattern;
import org.apache.directory.server.kerberos.shared.keytab.Keytab;
import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
import org.apache.directory.shared.kerberos.KerberosTime;
import org.apache.directory.shared.kerberos.codec.types.EncryptionType;
import org.apache.directory.shared.kerberos.components.EncryptionKey;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
public class TestKerberosUtil {
static String testKeytab = "test.keytab";
static String[] testPrincipals = new String[]{
"HTTP@testRealm",
"test/testhost@testRealm",
"HTTP/testhost@testRealm",
"HTTP1/testhost@testRealm",
"HTTP/testhostanother@testRealm"
};
@After
public void deleteKeytab() {
File keytabFile = new File(testKeytab);
if (keytabFile.exists()){
keytabFile.delete();
}
}
@Test
public void testGetServerPrincipal() throws IOException {
String service = "TestKerberosUtil";
String localHostname = KerberosUtil.getLocalHostName();
String testHost = "FooBar";
// send null hostname
Assert.assertEquals("When no hostname is sent",
service + "/" + localHostname.toLowerCase(Locale.ENGLISH),
KerberosUtil.getServicePrincipal(service, null));
// send empty hostname
Assert.assertEquals("When empty hostname is sent",
service + "/" + localHostname.toLowerCase(Locale.ENGLISH),
KerberosUtil.getServicePrincipal(service, ""));
// send 0.0.0.0 hostname
Assert.assertEquals("When 0.0.0.0 hostname is sent",
service + "/" + localHostname.toLowerCase(Locale.ENGLISH),
KerberosUtil.getServicePrincipal(service, "0.0.0.0"));
// send uppercase hostname
Assert.assertEquals("When uppercase hostname is sent",
service + "/" + testHost.toLowerCase(Locale.ENGLISH),
KerberosUtil.getServicePrincipal(service, testHost));
// send lowercase hostname
Assert.assertEquals("When lowercase hostname is sent",
service + "/" + testHost.toLowerCase(Locale.ENGLISH),
KerberosUtil.getServicePrincipal(
service, testHost.toLowerCase(Locale.ENGLISH)));
}
@Test
public void testGetPrincipalNamesMissingKeytab() {
try {
KerberosUtil.getPrincipalNames(testKeytab);
Assert.fail("Exception should have been thrown");
} catch (IOException e) {
//expects exception
}
}
@Test
public void testGetPrincipalNamesMissingPattern() throws IOException {
createKeyTab(testKeytab, new String[]{"test/testhost@testRealm"});
try {
KerberosUtil.getPrincipalNames(testKeytab, null);
Assert.fail("Exception should have been thrown");
} catch (Exception e) {
//expects exception
}
}
@Test
public void testGetPrincipalNamesFromKeytab() throws IOException {
createKeyTab(testKeytab, testPrincipals);
// read all principals in the keytab file
String[] principals = KerberosUtil.getPrincipalNames(testKeytab);
Assert.assertNotNull("principals cannot be null", principals);
int expectedSize = 0;
List<String> principalList = Arrays.asList(principals);
for (String principal : testPrincipals) {
Assert.assertTrue("missing principal "+principal,
principalList.contains(principal));
expectedSize++;
}
Assert.assertEquals(expectedSize, principals.length);
}
@Test
public void testGetPrincipalNamesFromKeytabWithPattern() throws IOException {
createKeyTab(testKeytab, testPrincipals);
// read the keytab file
// look for principals with HTTP as the first part
Pattern httpPattern = Pattern.compile("HTTP/.*");
String[] httpPrincipals =
KerberosUtil.getPrincipalNames(testKeytab, httpPattern);
Assert.assertNotNull("principals cannot be null", httpPrincipals);
int expectedSize = 0;
List<String> httpPrincipalList = Arrays.asList(httpPrincipals);
for (String principal : testPrincipals) {
if (httpPattern.matcher(principal).matches()) {
Assert.assertTrue("missing principal "+principal,
httpPrincipalList.contains(principal));
expectedSize++;
}
}
Assert.assertEquals(expectedSize, httpPrincipals.length);
}
private void createKeyTab(String fileName, String[] principalNames)
throws IOException {
//create a test keytab file
List<KeytabEntry> lstEntries = new ArrayList<KeytabEntry>();
for (String principal : principalNames){
// create 3 versions of the key to ensure methods don't return
// duplicate principals
for (int kvno=1; kvno <= 3; kvno++) {
EncryptionKey key = new EncryptionKey(
EncryptionType.UNKNOWN, "samplekey1".getBytes(), kvno);
KeytabEntry keytabEntry = new KeytabEntry(
principal, 1 , new KerberosTime(), (byte) 1, key);
lstEntries.add(keytabEntry);
}
}
Keytab keytab = Keytab.getInstance();
keytab.setEntries(lstEntries);
keytab.write(new File(testKeytab));
}
}
| 6,063 | 36.432099 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import java.util.Random;
import org.junit.Assert;
import org.junit.Test;
public class TestRandomSignerSecretProvider {
@Test
public void testGetAndRollSecrets() throws Exception {
long rolloverFrequency = 15 * 1000; // rollover every 15 sec
// use the same seed so we can predict the RNG
long seed = System.currentTimeMillis();
Random rand = new Random(seed);
byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
RandomSignerSecretProvider secretProvider =
new RandomSignerSecretProvider(seed);
try {
secretProvider.init(null, null, rolloverFrequency);
byte[] currentSecret = secretProvider.getCurrentSecret();
byte[][] allSecrets = secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret1, currentSecret);
Assert.assertEquals(2, allSecrets.length);
Assert.assertArrayEquals(secret1, allSecrets[0]);
Assert.assertNull(allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
currentSecret = secretProvider.getCurrentSecret();
allSecrets = secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret2, currentSecret);
Assert.assertEquals(2, allSecrets.length);
Assert.assertArrayEquals(secret2, allSecrets[0]);
Assert.assertArrayEquals(secret1, allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
currentSecret = secretProvider.getCurrentSecret();
allSecrets = secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret3, currentSecret);
Assert.assertEquals(2, allSecrets.length);
Assert.assertArrayEquals(secret3, allSecrets[0]);
Assert.assertArrayEquals(secret2, allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
} finally {
secretProvider.destroy();
}
}
}
| 2,561 | 39.03125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import java.nio.charset.Charset;
import java.util.Properties;
import javax.servlet.ServletContext;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
/**
* A SignerSecretProvider that simply creates a secret based on a given String.
*/
@InterfaceStability.Unstable
@VisibleForTesting
class StringSignerSecretProvider extends SignerSecretProvider {
private byte[] secret;
private byte[][] secrets;
public StringSignerSecretProvider() {}
@Override
public void init(Properties config, ServletContext servletContext,
long tokenValidity) throws Exception {
String signatureSecret = config.getProperty(
AuthenticationFilter.SIGNATURE_SECRET, null);
secret = signatureSecret.getBytes(Charset.forName("UTF-8"));
secrets = new byte[][]{secret};
}
@Override
public byte[] getCurrentSecret() {
return secret;
}
@Override
public byte[][] getAllSecrets() {
return secrets;
}
}
| 1,790 | 30.982143 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import java.util.Arrays;
import java.util.Properties;
import java.util.Random;
import javax.servlet.ServletContext;
import org.apache.curator.test.TestingServer;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
public class TestZKSignerSecretProvider {
private TestingServer zkServer;
@Before
public void setup() throws Exception {
zkServer = new TestingServer();
}
@After
public void teardown() throws Exception {
if (zkServer != null) {
zkServer.stop();
zkServer.close();
}
}
@Test
// Test just one ZKSignerSecretProvider to verify that it works in the
// simplest case
public void testOne() throws Exception {
long rolloverFrequency = 15 * 1000; // rollover every 15 sec
// use the same seed so we can predict the RNG
long seed = System.currentTimeMillis();
Random rand = new Random(seed);
byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
ZKSignerSecretProvider secretProvider = new ZKSignerSecretProvider(seed);
Properties config = new Properties();
config.setProperty(
ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
zkServer.getConnectString());
config.setProperty(ZKSignerSecretProvider.ZOOKEEPER_PATH,
"/secret");
try {
secretProvider.init(config, getDummyServletContext(), rolloverFrequency);
byte[] currentSecret = secretProvider.getCurrentSecret();
byte[][] allSecrets = secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret1, currentSecret);
Assert.assertEquals(2, allSecrets.length);
Assert.assertArrayEquals(secret1, allSecrets[0]);
Assert.assertNull(allSecrets[1]);
Thread.sleep((rolloverFrequency + 2000));
currentSecret = secretProvider.getCurrentSecret();
allSecrets = secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret2, currentSecret);
Assert.assertEquals(2, allSecrets.length);
Assert.assertArrayEquals(secret2, allSecrets[0]);
Assert.assertArrayEquals(secret1, allSecrets[1]);
Thread.sleep((rolloverFrequency + 2000));
currentSecret = secretProvider.getCurrentSecret();
allSecrets = secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret3, currentSecret);
Assert.assertEquals(2, allSecrets.length);
Assert.assertArrayEquals(secret3, allSecrets[0]);
Assert.assertArrayEquals(secret2, allSecrets[1]);
Thread.sleep((rolloverFrequency + 2000));
} finally {
secretProvider.destroy();
}
}
@Test
public void testMultipleInit() throws Exception {
long rolloverFrequency = 15 * 1000; // rollover every 15 sec
// use the same seed so we can predict the RNG
long seedA = System.currentTimeMillis();
Random rand = new Random(seedA);
byte[] secretA2 = Long.toString(rand.nextLong()).getBytes();
byte[] secretA1 = Long.toString(rand.nextLong()).getBytes();
// use the same seed so we can predict the RNG
long seedB = System.currentTimeMillis() + rand.nextLong();
rand = new Random(seedB);
byte[] secretB2 = Long.toString(rand.nextLong()).getBytes();
byte[] secretB1 = Long.toString(rand.nextLong()).getBytes();
// use the same seed so we can predict the RNG
long seedC = System.currentTimeMillis() + rand.nextLong();
rand = new Random(seedC);
byte[] secretC2 = Long.toString(rand.nextLong()).getBytes();
byte[] secretC1 = Long.toString(rand.nextLong()).getBytes();
ZKSignerSecretProvider secretProviderA = new ZKSignerSecretProvider(seedA);
ZKSignerSecretProvider secretProviderB = new ZKSignerSecretProvider(seedB);
ZKSignerSecretProvider secretProviderC = new ZKSignerSecretProvider(seedC);
Properties config = new Properties();
config.setProperty(
ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
zkServer.getConnectString());
config.setProperty(ZKSignerSecretProvider.ZOOKEEPER_PATH,
"/secret");
try {
secretProviderA.init(config, getDummyServletContext(), rolloverFrequency);
secretProviderB.init(config, getDummyServletContext(), rolloverFrequency);
secretProviderC.init(config, getDummyServletContext(), rolloverFrequency);
byte[] currentSecretA = secretProviderA.getCurrentSecret();
byte[][] allSecretsA = secretProviderA.getAllSecrets();
byte[] currentSecretB = secretProviderB.getCurrentSecret();
byte[][] allSecretsB = secretProviderB.getAllSecrets();
byte[] currentSecretC = secretProviderC.getCurrentSecret();
byte[][] allSecretsC = secretProviderC.getAllSecrets();
Assert.assertArrayEquals(currentSecretA, currentSecretB);
Assert.assertArrayEquals(currentSecretB, currentSecretC);
Assert.assertEquals(2, allSecretsA.length);
Assert.assertEquals(2, allSecretsB.length);
Assert.assertEquals(2, allSecretsC.length);
Assert.assertArrayEquals(allSecretsA[0], allSecretsB[0]);
Assert.assertArrayEquals(allSecretsB[0], allSecretsC[0]);
Assert.assertNull(allSecretsA[1]);
Assert.assertNull(allSecretsB[1]);
Assert.assertNull(allSecretsC[1]);
char secretChosen = 'z';
if (Arrays.equals(secretA1, currentSecretA)) {
Assert.assertArrayEquals(secretA1, allSecretsA[0]);
secretChosen = 'A';
} else if (Arrays.equals(secretB1, currentSecretB)) {
Assert.assertArrayEquals(secretB1, allSecretsA[0]);
secretChosen = 'B';
}else if (Arrays.equals(secretC1, currentSecretC)) {
Assert.assertArrayEquals(secretC1, allSecretsA[0]);
secretChosen = 'C';
} else {
Assert.fail("It appears that they all agreed on the same secret, but "
+ "not one of the secrets they were supposed to");
}
Thread.sleep((rolloverFrequency + 2000));
currentSecretA = secretProviderA.getCurrentSecret();
allSecretsA = secretProviderA.getAllSecrets();
currentSecretB = secretProviderB.getCurrentSecret();
allSecretsB = secretProviderB.getAllSecrets();
currentSecretC = secretProviderC.getCurrentSecret();
allSecretsC = secretProviderC.getAllSecrets();
Assert.assertArrayEquals(currentSecretA, currentSecretB);
Assert.assertArrayEquals(currentSecretB, currentSecretC);
Assert.assertEquals(2, allSecretsA.length);
Assert.assertEquals(2, allSecretsB.length);
Assert.assertEquals(2, allSecretsC.length);
Assert.assertArrayEquals(allSecretsA[0], allSecretsB[0]);
Assert.assertArrayEquals(allSecretsB[0], allSecretsC[0]);
Assert.assertArrayEquals(allSecretsA[1], allSecretsB[1]);
Assert.assertArrayEquals(allSecretsB[1], allSecretsC[1]);
// The second secret used is prechosen by whoever won the init; so it
// should match with whichever we saw before
if (secretChosen == 'A') {
Assert.assertArrayEquals(secretA2, currentSecretA);
} else if (secretChosen == 'B') {
Assert.assertArrayEquals(secretB2, currentSecretA);
} else if (secretChosen == 'C') {
Assert.assertArrayEquals(secretC2, currentSecretA);
}
} finally {
secretProviderC.destroy();
secretProviderB.destroy();
secretProviderA.destroy();
}
}
@Test
public void testMultipleUnsychnronized() throws Exception {
long rolloverFrequency = 15 * 1000; // rollover every 15 sec
// use the same seed so we can predict the RNG
long seedA = System.currentTimeMillis();
Random rand = new Random(seedA);
byte[] secretA2 = Long.toString(rand.nextLong()).getBytes();
byte[] secretA1 = Long.toString(rand.nextLong()).getBytes();
byte[] secretA3 = Long.toString(rand.nextLong()).getBytes();
// use the same seed so we can predict the RNG
long seedB = System.currentTimeMillis() + rand.nextLong();
rand = new Random(seedB);
byte[] secretB2 = Long.toString(rand.nextLong()).getBytes();
byte[] secretB1 = Long.toString(rand.nextLong()).getBytes();
byte[] secretB3 = Long.toString(rand.nextLong()).getBytes();
ZKSignerSecretProvider secretProviderA = new ZKSignerSecretProvider(seedA);
ZKSignerSecretProvider secretProviderB = new ZKSignerSecretProvider(seedB);
Properties config = new Properties();
config.setProperty(
ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
zkServer.getConnectString());
config.setProperty(ZKSignerSecretProvider.ZOOKEEPER_PATH,
"/secret");
try {
secretProviderA.init(config, getDummyServletContext(), rolloverFrequency);
byte[] currentSecretA = secretProviderA.getCurrentSecret();
byte[][] allSecretsA = secretProviderA.getAllSecrets();
Assert.assertArrayEquals(secretA1, currentSecretA);
Assert.assertEquals(2, allSecretsA.length);
Assert.assertArrayEquals(secretA1, allSecretsA[0]);
Assert.assertNull(allSecretsA[1]);
Thread.sleep((rolloverFrequency + 2000));
currentSecretA = secretProviderA.getCurrentSecret();
allSecretsA = secretProviderA.getAllSecrets();
Assert.assertArrayEquals(secretA2, currentSecretA);
Assert.assertEquals(2, allSecretsA.length);
Assert.assertArrayEquals(secretA2, allSecretsA[0]);
Assert.assertArrayEquals(secretA1, allSecretsA[1]);
Thread.sleep((rolloverFrequency / 5));
secretProviderB.init(config, getDummyServletContext(), rolloverFrequency);
byte[] currentSecretB = secretProviderB.getCurrentSecret();
byte[][] allSecretsB = secretProviderB.getAllSecrets();
Assert.assertArrayEquals(secretA2, currentSecretB);
Assert.assertEquals(2, allSecretsA.length);
Assert.assertArrayEquals(secretA2, allSecretsB[0]);
Assert.assertArrayEquals(secretA1, allSecretsB[1]);
Thread.sleep((rolloverFrequency));
currentSecretA = secretProviderA.getCurrentSecret();
allSecretsA = secretProviderA.getAllSecrets();
currentSecretB = secretProviderB.getCurrentSecret();
allSecretsB = secretProviderB.getAllSecrets();
Assert.assertArrayEquals(currentSecretA, currentSecretB);
Assert.assertEquals(2, allSecretsA.length);
Assert.assertEquals(2, allSecretsB.length);
Assert.assertArrayEquals(allSecretsA[0], allSecretsB[0]);
Assert.assertArrayEquals(allSecretsA[1], allSecretsB[1]);
if (Arrays.equals(secretA3, currentSecretA)) {
Assert.assertArrayEquals(secretA3, allSecretsA[0]);
} else if (Arrays.equals(secretB3, currentSecretB)) {
Assert.assertArrayEquals(secretB3, allSecretsA[0]);
} else {
Assert.fail("It appears that they all agreed on the same secret, but "
+ "not one of the secrets they were supposed to");
}
} finally {
secretProviderB.destroy();
secretProviderA.destroy();
}
}
private ServletContext getDummyServletContext() {
ServletContext servletContext = Mockito.mock(ServletContext.class);
Mockito.when(servletContext.getAttribute(ZKSignerSecretProvider
.ZOOKEEPER_SIGNER_SECRET_PROVIDER_CURATOR_CLIENT_ATTRIBUTE))
.thenReturn(null);
return servletContext;
}
}
| 11,952 | 43.107011 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.net.HttpCookie;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;
import java.util.Vector;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.util.Signer;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import static org.hamcrest.CoreMatchers.not;
import static org.junit.Assert.assertThat;
public class TestAuthenticationFilter {
private static final long TOKEN_VALIDITY_SEC = 1000;
@Test
public void testGetConfiguration() throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("");
Mockito.when(config.getInitParameter("a")).thenReturn("A");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>(Arrays.asList("a")).elements());
Properties props = filter.getConfiguration("", config);
Assert.assertEquals("A", props.getProperty("a"));
config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo");
Mockito.when(config.getInitParameter("foo.a")).thenReturn("A");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>(Arrays.asList("foo.a")).elements());
props = filter.getConfiguration("foo.", config);
Assert.assertEquals("A", props.getProperty("a"));
}
@Test
public void testInitEmpty() throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>().elements());
filter.init(config);
Assert.fail();
} catch (ServletException ex) {
// Expected
Assert.assertEquals("Authentication type must be specified: simple|kerberos|<class>",
ex.getMessage());
} catch (Exception ex) {
Assert.fail();
} finally {
filter.destroy();
}
}
public static class DummyAuthenticationHandler implements AuthenticationHandler {
public static boolean init;
public static boolean managementOperationReturn;
public static boolean destroy;
public static boolean expired;
public static final String TYPE = "dummy";
public static void reset() {
init = false;
destroy = false;
}
@Override
public void init(Properties config) throws ServletException {
init = true;
managementOperationReturn =
config.getProperty("management.operation.return", "true").equals("true");
expired = config.getProperty("expired.token", "false").equals("true");
}
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
if (!managementOperationReturn) {
response.setStatus(HttpServletResponse.SC_ACCEPTED);
}
return managementOperationReturn;
}
@Override
public void destroy() {
destroy = true;
}
@Override
public String getType() {
return TYPE;
}
@Override
public AuthenticationToken authenticate(HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token = null;
String param = request.getParameter("authenticated");
if (param != null && param.equals("true")) {
token = new AuthenticationToken("u", "p", "t");
token.setExpires((expired) ? 0 : System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
} else {
if (request.getHeader("WWW-Authenticate") == null) {
response.setHeader("WWW-Authenticate", "dummyauth");
} else {
throw new AuthenticationException("AUTH FAILED");
}
}
return token;
}
}
@Test
public void testFallbackToRandomSecretProvider() throws Exception {
// minimal configuration & simple auth handler (Pseudo)
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(
AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn(
(new Long(TOKEN_VALIDITY_SEC)).toString());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
.thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
Assert.assertTrue(filter.isRandomSecret());
Assert.assertFalse(filter.isCustomSignerSecretProvider());
Assert.assertNull(filter.getCookieDomain());
Assert.assertNull(filter.getCookiePath());
Assert.assertEquals(TOKEN_VALIDITY_SEC, filter.getValidity());
} finally {
filter.destroy();
}
}
@Test
public void testInit() throws Exception {
// custom secret as inline
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<>(Arrays.asList(AuthenticationFilter.AUTH_TYPE))
.elements());
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(
AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE)).thenReturn(
new SignerSecretProvider() {
@Override
public void init(Properties config, ServletContext servletContext,
long tokenValidity) {
}
@Override
public byte[] getCurrentSecret() {
return null;
}
@Override
public byte[][] getAllSecrets() {
return null;
}
});
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertFalse(filter.isRandomSecret());
Assert.assertTrue(filter.isCustomSignerSecretProvider());
} finally {
filter.destroy();
}
// custom secret by file
File testDir = new File(System.getProperty("test.build.data",
"target/test-dir"));
testDir.mkdirs();
String secretValue = "hadoop";
File secretFile = new File(testDir, "http-secret.txt");
Writer writer = new FileWriter(secretFile);
writer.write(secretValue);
writer.close();
filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(
AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(
AuthenticationFilter.SIGNATURE_SECRET_FILE))
.thenReturn(secretFile.getAbsolutePath());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET_FILE)).elements());
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(
AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
.thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertFalse(filter.isRandomSecret());
Assert.assertFalse(filter.isCustomSignerSecretProvider());
} finally {
filter.destroy();
}
// custom cookie domain and cookie path
filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.COOKIE_DOMAIN,
AuthenticationFilter.COOKIE_PATH)).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
Assert.assertEquals(".foo.com", filter.getCookieDomain());
Assert.assertEquals("/bar", filter.getCookiePath());
} finally {
filter.destroy();
}
// authentication handler lifecycle, and custom impl
DummyAuthenticationHandler.reset();
filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
Assert.assertTrue(DummyAuthenticationHandler.init);
} finally {
filter.destroy();
Assert.assertTrue(DummyAuthenticationHandler.destroy);
}
// kerberos auth handler
filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
ServletContext sc = Mockito.mock(ServletContext.class);
Mockito.when(config.getServletContext()).thenReturn(sc);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
} catch (ServletException ex) {
// Expected
} finally {
Assert.assertEquals(KerberosAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
filter.destroy();
}
}
@Test
public void testInitCaseSensitivity() throws Exception {
// minimal configuration & simple auth handler (Pseudo)
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("SimPle");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn(
(new Long(TOKEN_VALIDITY_SEC)).toString());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
Assert.assertEquals(PseudoAuthenticationHandler.class,
filter.getAuthenticationHandler().getClass());
} finally {
filter.destroy();
}
}
@Test
public void testGetRequestURL() throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
Mockito.when(request.getQueryString()).thenReturn("a=A&b=B");
Assert.assertEquals("http://foo:8080/bar?a=A&b=B", filter.getRequestURL(request));
} finally {
filter.destroy();
}
}
@Test
public void testGetToken() throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET,
"management.operation.return")).elements());
SignerSecretProvider secretProvider =
getMockedServletContextWithStringSigner(config);
filter.init(config);
AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
Signer signer = new Signer(secretProvider);
String tokenSigned = signer.sign(token.toString());
Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
AuthenticationToken newToken = filter.getToken(request);
Assert.assertEquals(token.toString(), newToken.toString());
} finally {
filter.destroy();
}
}
@Test
public void testGetTokenExpired() throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
AuthenticationToken token =
new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(
AuthenticationFilter.SIGNATURE_SECRET, "secret");
secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
Signer signer = new Signer(secretProvider);
String tokenSigned = signer.sign(token.toString());
Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
boolean failed = false;
try {
filter.getToken(request);
} catch (AuthenticationException ex) {
Assert.assertEquals("AuthenticationToken expired", ex.getMessage());
failed = true;
} finally {
Assert.assertTrue("token not expired", failed);
}
} finally {
filter.destroy();
}
}
@Test
public void testGetTokenInvalidType() throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(
AuthenticationFilter.SIGNATURE_SECRET, "secret");
secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
Signer signer = new Signer(secretProvider);
String tokenSigned = signer.sign(token.toString());
Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
boolean failed = false;
try {
filter.getToken(request);
} catch (AuthenticationException ex) {
Assert.assertEquals("Invalid AuthenticationToken type", ex.getMessage());
failed = true;
} finally {
Assert.assertTrue("token not invalid type", failed);
}
} finally {
filter.destroy();
}
}
private static SignerSecretProvider getMockedServletContextWithStringSigner(
FilterConfig config) throws Exception {
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(AuthenticationFilter.SIGNATURE_SECRET,
"secret");
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
ServletContext context = Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(
AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
.thenReturn(secretProvider);
Mockito.when(config.getServletContext()).thenReturn(context);
return secretProvider;
}
@Test
public void testDoFilterNotAuthenticated() throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
FilterChain chain = Mockito.mock(FilterChain.class);
Mockito.doAnswer(
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Assert.fail();
return null;
}
}
).when(chain).doFilter(Mockito.<ServletRequest>anyObject(), Mockito.<ServletResponse>anyObject());
Mockito.when(response.containsHeader("WWW-Authenticate")).thenReturn(true);
filter.doFilter(request, response, chain);
Mockito.verify(response).sendError(
HttpServletResponse.SC_UNAUTHORIZED, "Authentication required");
} finally {
filter.destroy();
}
}
private void _testDoFilterAuthentication(boolean withDomainPath,
boolean invalidToken,
boolean expired) throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter("expired.token")).
thenReturn(Boolean.toString(expired));
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE))
.thenReturn(DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter
.AUTH_TOKEN_VALIDITY)).thenReturn(new Long(TOKEN_VALIDITY_SEC).toString());
Mockito.when(config.getInitParameter(AuthenticationFilter
.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(new
Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.AUTH_TOKEN_VALIDITY,
AuthenticationFilter.SIGNATURE_SECRET, "management.operation" +
".return", "expired.token")).elements());
getMockedServletContextWithStringSigner(config);
if (withDomainPath) {
Mockito.when(config.getInitParameter(AuthenticationFilter
.COOKIE_DOMAIN)).thenReturn(".foo.com");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH))
.thenReturn("/bar");
Mockito.when(config.getInitParameterNames()).thenReturn(new
Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.AUTH_TOKEN_VALIDITY,
AuthenticationFilter.SIGNATURE_SECRET,
AuthenticationFilter.COOKIE_DOMAIN, AuthenticationFilter
.COOKIE_PATH, "management.operation.return")).elements());
}
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getParameter("authenticated")).thenReturn("true");
Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer
("http://foo:8080/bar"));
Mockito.when(request.getQueryString()).thenReturn("authenticated=true");
if (invalidToken) {
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{new Cookie
(AuthenticatedURL.AUTH_COOKIE, "foo")});
}
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
FilterChain chain = Mockito.mock(FilterChain.class);
final HashMap<String, String> cookieMap = new HashMap<String, String>();
Mockito.doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
String cookieHeader = (String)invocation.getArguments()[1];
parseCookieMap(cookieHeader, cookieMap);
return null;
}
}).when(response).addHeader(Mockito.eq("Set-Cookie"), Mockito.anyString());
try {
filter.init(config);
filter.doFilter(request, response, chain);
if (expired) {
Mockito.verify(response, Mockito.never()).
addHeader(Mockito.eq("Set-Cookie"), Mockito.anyString());
} else {
String v = cookieMap.get(AuthenticatedURL.AUTH_COOKIE);
Assert.assertNotNull("cookie missing", v);
Assert.assertTrue(v.contains("u=") && v.contains("p=") && v.contains
("t=") && v.contains("e=") && v.contains("s="));
Mockito.verify(chain).doFilter(Mockito.any(ServletRequest.class),
Mockito.any(ServletResponse.class));
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(
AuthenticationFilter.SIGNATURE_SECRET, "secret");
secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
Signer signer = new Signer(secretProvider);
String value = signer.verifyAndExtract(v);
AuthenticationToken token = AuthenticationToken.parse(value);
assertThat(token.getExpires(), not(0L));
if (withDomainPath) {
Assert.assertEquals(".foo.com", cookieMap.get("Domain"));
Assert.assertEquals("/bar", cookieMap.get("Path"));
} else {
Assert.assertFalse(cookieMap.containsKey("Domain"));
Assert.assertFalse(cookieMap.containsKey("Path"));
}
}
} finally {
filter.destroy();
}
}
private static void parseCookieMap(String cookieHeader, HashMap<String,
String> cookieMap) {
List<HttpCookie> cookies = HttpCookie.parse(cookieHeader);
for (HttpCookie cookie : cookies) {
if (AuthenticatedURL.AUTH_COOKIE.equals(cookie.getName())) {
cookieMap.put(cookie.getName(), cookie.getValue());
if (cookie.getPath() != null) {
cookieMap.put("Path", cookie.getPath());
}
if (cookie.getDomain() != null) {
cookieMap.put("Domain", cookie.getDomain());
}
}
}
}
@Test
public void testDoFilterAuthentication() throws Exception {
_testDoFilterAuthentication(false, false, false);
}
@Test
public void testDoFilterAuthenticationImmediateExpiration() throws Exception {
_testDoFilterAuthentication(false, false, true);
}
@Test
public void testDoFilterAuthenticationWithInvalidToken() throws Exception {
_testDoFilterAuthentication(false, true, false);
}
@Test
public void testDoFilterAuthenticationWithDomainPath() throws Exception {
_testDoFilterAuthentication(true, false, false);
}
@Test
public void testDoFilterAuthenticated() throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
AuthenticationToken token = new AuthenticationToken("u", "p", "t");
token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(
AuthenticationFilter.SIGNATURE_SECRET, "secret");
secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
Signer signer = new Signer(secretProvider);
String tokenSigned = signer.sign(token.toString());
Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
FilterChain chain = Mockito.mock(FilterChain.class);
Mockito.doAnswer(
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
HttpServletRequest request = (HttpServletRequest) args[0];
Assert.assertEquals("u", request.getRemoteUser());
Assert.assertEquals("p", request.getUserPrincipal().getName());
return null;
}
}
).when(chain).doFilter(Mockito.<ServletRequest>anyObject(), Mockito.<ServletResponse>anyObject());
filter.doFilter(request, response, chain);
} finally {
filter.destroy();
}
}
@Test
public void testDoFilterAuthenticationFailure() throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{});
Mockito.when(request.getHeader("WWW-Authenticate")).thenReturn("dummyauth");
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
FilterChain chain = Mockito.mock(FilterChain.class);
final HashMap<String, String> cookieMap = new HashMap<String, String>();
Mockito.doAnswer(
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
parseCookieMap((String) args[1], cookieMap);
return null;
}
}
).when(response).addHeader(Mockito.eq("Set-Cookie"), Mockito.anyString());
Mockito.doAnswer(
new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Assert.fail("shouldn't get here");
return null;
}
}
).when(chain).doFilter(Mockito.<ServletRequest>anyObject(), Mockito.<ServletResponse>anyObject());
filter.doFilter(request, response, chain);
Mockito.verify(response).sendError(
HttpServletResponse.SC_FORBIDDEN, "AUTH FAILED");
Mockito.verify(response, Mockito.never()).setHeader(Mockito.eq("WWW-Authenticate"), Mockito.anyString());
String value = cookieMap.get(AuthenticatedURL.AUTH_COOKIE);
Assert.assertNotNull("cookie missing", value);
Assert.assertEquals("", value);
} finally {
filter.destroy();
}
}
@Test
public void testDoFilterAuthenticatedExpired() throws Exception {
String secret = "secret";
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn(
secret);
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(
AuthenticationFilter.SIGNATURE_SECRET, secret);
secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
Signer signer = new Signer(secretProvider);
String tokenSigned = signer.sign(token.toString());
Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.containsHeader("WWW-Authenticate")).thenReturn(true);
FilterChain chain = Mockito.mock(FilterChain.class);
verifyUnauthorized(filter, request, response, chain);
} finally {
filter.destroy();
}
}
private static void verifyUnauthorized(AuthenticationFilter filter,
HttpServletRequest request,
HttpServletResponse response,
FilterChain chain) throws
IOException,
ServletException {
final HashMap<String, String> cookieMap = new HashMap<String, String>();
Mockito.doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
String cookieHeader = (String) invocation.getArguments()[1];
parseCookieMap(cookieHeader, cookieMap);
return null;
}
}).when(response).addHeader(Mockito.eq("Set-Cookie"), Mockito.anyString());
filter.doFilter(request, response, chain);
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse
.SC_UNAUTHORIZED), Mockito.anyString());
Mockito.verify(chain, Mockito.never()).doFilter(Mockito.any
(ServletRequest.class), Mockito.any(ServletResponse.class));
Assert.assertTrue("cookie is missing",
cookieMap.containsKey(AuthenticatedURL.AUTH_COOKIE));
Assert.assertEquals("", cookieMap.get(AuthenticatedURL.AUTH_COOKIE));
}
@Test
public void testDoFilterAuthenticatedInvalidType() throws Exception {
String secret = "secret";
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn(
secret);
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
AuthenticationFilter.SIGNATURE_SECRET,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(
AuthenticationFilter.SIGNATURE_SECRET, secret);
secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
Signer signer = new Signer(secretProvider);
String tokenSigned = signer.sign(token.toString());
Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.containsHeader("WWW-Authenticate")).thenReturn(true);
FilterChain chain = Mockito.mock(FilterChain.class);
verifyUnauthorized(filter, request, response, chain);
} finally {
filter.destroy();
}
}
@Test
public void testManagementOperation() throws Exception {
AuthenticationFilter filter = new AuthenticationFilter();
try {
FilterConfig config = Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).
thenReturn("false");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).
thenReturn(DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(
new Vector<String>(
Arrays.asList(AuthenticationFilter.AUTH_TYPE,
"management.operation.return")).elements());
getMockedServletContextWithStringSigner(config);
filter.init(config);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).
thenReturn(new StringBuffer("http://foo:8080/bar"));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
FilterChain chain = Mockito.mock(FilterChain.class);
filter.doFilter(request, response, chain);
Mockito.verify(response).setStatus(HttpServletResponse.SC_ACCEPTED);
Mockito.verifyNoMoreInteractions(response);
Mockito.reset(request);
Mockito.reset(response);
AuthenticationToken token = new AuthenticationToken("u", "p", "t");
token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
SignerSecretProvider secretProvider =
StringSignerSecretProviderCreator.newStringSignerSecretProvider();
Properties secretProviderProps = new Properties();
secretProviderProps.setProperty(
AuthenticationFilter.SIGNATURE_SECRET, "secret");
secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
Signer signer = new Signer(secretProvider);
String tokenSigned = signer.sign(token.toString());
Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
filter.doFilter(request, response, chain);
Mockito.verify(response).setStatus(HttpServletResponse.SC_ACCEPTED);
Mockito.verifyNoMoreInteractions(response);
} finally {
filter.destroy();
}
}
}
| 41,987 | 41.976459 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.Properties;
public class TestPseudoAuthenticationHandler {
@Test
public void testInit() throws Exception {
PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler();
try {
Properties props = new Properties();
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
handler.init(props);
Assert.assertEquals(false, handler.getAcceptAnonymous());
} finally {
handler.destroy();
}
}
@Test
public void testType() throws Exception {
PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler();
Assert.assertEquals(PseudoAuthenticationHandler.TYPE, handler.getType());
}
@Test
public void testAnonymousOn() throws Exception {
PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler();
try {
Properties props = new Properties();
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
handler.init(props);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals(AuthenticationToken.ANONYMOUS, token);
} finally {
handler.destroy();
}
}
@Test
public void testAnonymousOff() throws Exception {
PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler();
try {
Properties props = new Properties();
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
handler.init(props);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertNull(token);
} finally {
handler.destroy();
}
}
private void _testUserName(boolean anonymous) throws Exception {
PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler();
try {
Properties props = new Properties();
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, Boolean.toString(anonymous));
handler.init(props);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(PseudoAuthenticator.USER_NAME + "=" + "user");
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertNotNull(token);
Assert.assertEquals("user", token.getUserName());
Assert.assertEquals("user", token.getName());
Assert.assertEquals(PseudoAuthenticationHandler.TYPE, token.getType());
} finally {
handler.destroy();
}
}
@Test
public void testUserNameAnonymousOff() throws Exception {
_testUserName(false);
}
@Test
public void testUserNameAnonymousOn() throws Exception {
_testUserName(true);
}
}
| 4,062 | 33.432203 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import java.io.IOException;
import java.util.Properties;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
public class TestAltKerberosAuthenticationHandler
extends TestKerberosAuthenticationHandler {
@Override
protected KerberosAuthenticationHandler getNewAuthenticationHandler() {
// AltKerberosAuthenticationHandler is abstract; a subclass would normally
// perform some other authentication when alternateAuthenticate() is called.
// For the test, we'll just return an AuthenticationToken as the other
// authentication is left up to the developer of the subclass
return new AltKerberosAuthenticationHandler() {
@Override
public AuthenticationToken alternateAuthenticate(
HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
return new AuthenticationToken("A", "B", getType());
}
};
}
@Override
protected String getExpectedType() {
return AltKerberosAuthenticationHandler.TYPE;
}
@Test(timeout=60000)
public void testAlternateAuthenticationAsBrowser() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
// By default, a User-Agent without "java", "curl", "wget", or "perl" in it
// is considered a browser
Mockito.when(request.getHeader("User-Agent")).thenReturn("Some Browser");
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals("A", token.getUserName());
Assert.assertEquals("B", token.getName());
Assert.assertEquals(getExpectedType(), token.getType());
}
@Test(timeout=60000)
public void testNonDefaultNonBrowserUserAgentAsBrowser() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
if (handler != null) {
handler.destroy();
handler = null;
}
handler = getNewAuthenticationHandler();
Properties props = getDefaultProperties();
props.setProperty("alt-kerberos.non-browser.user-agents", "foo, bar");
try {
handler.init(props);
} catch (Exception ex) {
handler = null;
throw ex;
}
// Pretend we're something that will not match with "foo" (or "bar")
Mockito.when(request.getHeader("User-Agent")).thenReturn("blah");
// Should use alt authentication
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals("A", token.getUserName());
Assert.assertEquals("B", token.getName());
Assert.assertEquals(getExpectedType(), token.getType());
}
@Test(timeout=60000)
public void testNonDefaultNonBrowserUserAgentAsNonBrowser() throws Exception {
if (handler != null) {
handler.destroy();
handler = null;
}
handler = getNewAuthenticationHandler();
Properties props = getDefaultProperties();
props.setProperty("alt-kerberos.non-browser.user-agents", "foo, bar");
try {
handler.init(props);
} catch (Exception ex) {
handler = null;
throw ex;
}
// Run the kerberos tests again
testRequestWithoutAuthorization();
testRequestWithInvalidAuthorization();
testRequestWithAuthorization();
testRequestWithInvalidKerberosAuthorization();
}
}
| 4,273 | 35.844828 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import org.junit.Assert;
import org.junit.Test;
public class TestAuthenticationToken {
@Test
public void testAnonymous() {
Assert.assertNotNull(AuthenticationToken.ANONYMOUS);
Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getUserName());
Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getName());
Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getType());
Assert.assertEquals(-1, AuthenticationToken.ANONYMOUS.getExpires());
Assert.assertFalse(AuthenticationToken.ANONYMOUS.isExpired());
}
}
| 1,203 | 37.83871 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
import org.apache.hadoop.security.authentication.KerberosTestUtils;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.GSSName;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.ietf.jgss.Oid;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.security.Principal;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.Callable;
public class TestKerberosAuthenticationHandler
extends KerberosSecurityTestcase {
protected KerberosAuthenticationHandler handler;
protected KerberosAuthenticationHandler getNewAuthenticationHandler() {
return new KerberosAuthenticationHandler();
}
protected String getExpectedType() {
return KerberosAuthenticationHandler.TYPE;
}
protected Properties getDefaultProperties() {
Properties props = new Properties();
props.setProperty(KerberosAuthenticationHandler.PRINCIPAL,
KerberosTestUtils.getServerPrincipal());
props.setProperty(KerberosAuthenticationHandler.KEYTAB,
KerberosTestUtils.getKeytabFile());
props.setProperty(KerberosAuthenticationHandler.NAME_RULES,
"RULE:[1:$1@$0](.*@" + KerberosTestUtils.getRealm()+")s/@.*//\n");
return props;
}
@Before
public void setup() throws Exception {
// create keytab
File keytabFile = new File(KerberosTestUtils.getKeytabFile());
String clientPrincipal = KerberosTestUtils.getClientPrincipal();
String serverPrincipal = KerberosTestUtils.getServerPrincipal();
clientPrincipal = clientPrincipal.substring(0, clientPrincipal.lastIndexOf("@"));
serverPrincipal = serverPrincipal.substring(0, serverPrincipal.lastIndexOf("@"));
getKdc().createPrincipal(keytabFile, clientPrincipal, serverPrincipal);
// handler
handler = getNewAuthenticationHandler();
Properties props = getDefaultProperties();
try {
handler.init(props);
} catch (Exception ex) {
handler = null;
throw ex;
}
}
@Test(timeout=60000)
public void testNameRules() throws Exception {
KerberosName kn = new KerberosName(KerberosTestUtils.getServerPrincipal());
Assert.assertEquals(KerberosTestUtils.getRealm(), kn.getRealm());
//destroy handler created in setUp()
handler.destroy();
KerberosName.setRules("RULE:[1:$1@$0](.*@FOO)s/@.*//\nDEFAULT");
handler = getNewAuthenticationHandler();
Properties props = getDefaultProperties();
props.setProperty(KerberosAuthenticationHandler.NAME_RULES, "RULE:[1:$1@$0](.*@BAR)s/@.*//\nDEFAULT");
try {
handler.init(props);
} catch (Exception ex) {
}
kn = new KerberosName("bar@BAR");
Assert.assertEquals("bar", kn.getShortName());
kn = new KerberosName("bar@FOO");
try {
kn.getShortName();
Assert.fail();
}
catch (Exception ex) {
}
}
@Test(timeout=60000)
public void testInit() throws Exception {
Assert.assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab());
Set<KerberosPrincipal> principals = handler.getPrincipals();
Principal expectedPrincipal =
new KerberosPrincipal(KerberosTestUtils.getServerPrincipal());
Assert.assertTrue(principals.contains(expectedPrincipal));
Assert.assertEquals(1, principals.size());
}
// dynamic configuration of HTTP principals
@Test(timeout=60000)
public void testDynamicPrincipalDiscovery() throws Exception {
String[] keytabUsers = new String[]{
"HTTP/host1", "HTTP/host2", "HTTP2/host1", "XHTTP/host"
};
String keytab = KerberosTestUtils.getKeytabFile();
getKdc().createPrincipal(new File(keytab), keytabUsers);
// destroy handler created in setUp()
handler.destroy();
Properties props = new Properties();
props.setProperty(KerberosAuthenticationHandler.KEYTAB, keytab);
props.setProperty(KerberosAuthenticationHandler.PRINCIPAL, "*");
handler = getNewAuthenticationHandler();
handler.init(props);
Assert.assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab());
Set<KerberosPrincipal> loginPrincipals = handler.getPrincipals();
for (String user : keytabUsers) {
Principal principal = new KerberosPrincipal(
user + "@" + KerberosTestUtils.getRealm());
boolean expected = user.startsWith("HTTP/");
Assert.assertEquals("checking for "+user, expected,
loginPrincipals.contains(principal));
}
}
// dynamic configuration of HTTP principals
@Test(timeout=60000)
public void testDynamicPrincipalDiscoveryMissingPrincipals() throws Exception {
String[] keytabUsers = new String[]{"hdfs/localhost"};
String keytab = KerberosTestUtils.getKeytabFile();
getKdc().createPrincipal(new File(keytab), keytabUsers);
// destroy handler created in setUp()
handler.destroy();
Properties props = new Properties();
props.setProperty(KerberosAuthenticationHandler.KEYTAB, keytab);
props.setProperty(KerberosAuthenticationHandler.PRINCIPAL, "*");
handler = getNewAuthenticationHandler();
try {
handler.init(props);
Assert.fail("init should have failed");
} catch (ServletException ex) {
Assert.assertEquals("Principals do not exist in the keytab",
ex.getCause().getMessage());
} catch (Throwable t) {
Assert.fail("wrong exception: "+t);
}
}
@Test(timeout=60000)
public void testType() throws Exception {
Assert.assertEquals(getExpectedType(), handler.getType());
}
public void testRequestWithoutAuthorization() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Assert.assertNull(handler.authenticate(request, response));
Mockito.verify(response).setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED);
}
public void testRequestWithInvalidAuthorization() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getHeader(KerberosAuthenticator.AUTHORIZATION)).thenReturn("invalid");
Assert.assertNull(handler.authenticate(request, response));
Mockito.verify(response).setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED);
}
@Test(timeout=60000)
public void testRequestWithIncompleteAuthorization() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getHeader(KerberosAuthenticator.AUTHORIZATION))
.thenReturn(KerberosAuthenticator.NEGOTIATE);
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
// Expected
} catch (Exception ex) {
Assert.fail();
}
}
public void testRequestWithAuthorization() throws Exception {
String token = KerberosTestUtils.doAsClient(new Callable<String>() {
@Override
public String call() throws Exception {
GSSManager gssManager = GSSManager.getInstance();
GSSContext gssContext = null;
try {
String servicePrincipal = KerberosTestUtils.getServerPrincipal();
Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
GSSName serviceName = gssManager.createName(servicePrincipal,
oid);
oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
gssContext = gssManager.createContext(serviceName, oid, null,
GSSContext.DEFAULT_LIFETIME);
gssContext.requestCredDeleg(true);
gssContext.requestMutualAuth(true);
byte[] inToken = new byte[0];
byte[] outToken = gssContext.initSecContext(inToken, 0, inToken.length);
Base64 base64 = new Base64(0);
return base64.encodeToString(outToken);
} finally {
if (gssContext != null) {
gssContext.dispose();
}
}
}
});
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getHeader(KerberosAuthenticator.AUTHORIZATION))
.thenReturn(KerberosAuthenticator.NEGOTIATE + " " + token);
Mockito.when(request.getServerName()).thenReturn("localhost");
AuthenticationToken authToken = handler.authenticate(request, response);
if (authToken != null) {
Mockito.verify(response).setHeader(Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
Mockito.matches(KerberosAuthenticator.NEGOTIATE + " .*"));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
Assert.assertEquals(KerberosTestUtils.getClientPrincipal(), authToken.getName());
Assert.assertTrue(KerberosTestUtils.getClientPrincipal().startsWith(authToken.getUserName()));
Assert.assertEquals(getExpectedType(), authToken.getType());
} else {
Mockito.verify(response).setHeader(Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
Mockito.matches(KerberosAuthenticator.NEGOTIATE + " .*"));
Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED);
}
}
public void testRequestWithInvalidKerberosAuthorization() throws Exception {
String token = new Base64(0).encodeToString(new byte[]{0, 1, 2});
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getHeader(KerberosAuthenticator.AUTHORIZATION)).thenReturn(
KerberosAuthenticator.NEGOTIATE + token);
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
// Expected
} catch (Exception ex) {
Assert.fail();
}
}
@After
public void tearDown() throws Exception {
if (handler != null) {
handler.destroy();
handler = null;
}
}
}
| 11,803 | 37.575163 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestJWTRedirectAuthentictionHandler.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.net.MalformedURLException;
import java.net.URL;
import java.security.KeyPair;
import java.security.KeyPairGenerator;
import java.security.NoSuchAlgorithmException;
import java.security.interfaces.RSAPrivateKey;
import java.security.interfaces.RSAPublicKey;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
import java.util.Properties;
import java.util.Vector;
import java.util.Date;
import javax.servlet.ServletException;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
import org.apache.hadoop.security.authentication.KerberosTestUtils;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import com.nimbusds.jose.*;
import com.nimbusds.jwt.JWTClaimsSet;
import com.nimbusds.jwt.SignedJWT;
import com.nimbusds.jose.crypto.RSASSASigner;
import com.nimbusds.jose.crypto.RSASSAVerifier;
import com.nimbusds.jose.util.Base64URL;
public class TestJWTRedirectAuthentictionHandler extends
KerberosSecurityTestcase {
private static final String SERVICE_URL = "https://localhost:8888/resource";
private static final String REDIRECT_LOCATION =
"https://localhost:8443/authserver?originalUrl=" + SERVICE_URL;
RSAPublicKey publicKey = null;
RSAPrivateKey privateKey = null;
JWTRedirectAuthenticationHandler handler = null;
@Test
public void testNoPublicKeyJWT() throws Exception {
try {
Properties props = getProperties();
handler.init(props);
SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
privateKey);
Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer(SERVICE_URL));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
SERVICE_URL);
AuthenticationToken token = handler.alternateAuthenticate(request,
response);
fail("alternateAuthentication should have thrown a ServletException");
} catch (ServletException se) {
assertTrue(se.getMessage().contains(
"Public key for signature validation must be provisioned"));
} catch (AuthenticationException ae) {
fail("alternateAuthentication should NOT have thrown a AuthenticationException");
}
}
@Test
public void testCustomCookieNameJWT() throws Exception {
try {
handler.setPublicKey(publicKey);
Properties props = getProperties();
props.put(JWTRedirectAuthenticationHandler.JWT_COOKIE_NAME, "jowt");
handler.init(props);
SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
privateKey);
Cookie cookie = new Cookie("jowt", jwt.serialize());
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer(SERVICE_URL));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
SERVICE_URL);
AuthenticationToken token = handler.alternateAuthenticate(request,
response);
Assert.assertEquals("bob", token.getUserName());
} catch (ServletException se) {
fail("alternateAuthentication should NOT have thrown a ServletException: "
+ se.getMessage());
} catch (AuthenticationException ae) {
fail("alternateAuthentication should NOT have thrown a AuthenticationException");
}
}
@Test
public void testNoProviderURLJWT() throws Exception {
try {
handler.setPublicKey(publicKey);
Properties props = getProperties();
props
.remove(JWTRedirectAuthenticationHandler.AUTHENTICATION_PROVIDER_URL);
handler.init(props);
SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
privateKey);
Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer(SERVICE_URL));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
SERVICE_URL);
AuthenticationToken token = handler.alternateAuthenticate(request,
response);
fail("alternateAuthentication should have thrown an AuthenticationException");
} catch (ServletException se) {
assertTrue(se.getMessage().contains(
"Authentication provider URL must not be null"));
} catch (AuthenticationException ae) {
fail("alternateAuthentication should NOT have thrown a AuthenticationException");
}
}
@Test
public void testUnableToParseJWT() throws Exception {
try {
KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
kpg.initialize(2048);
KeyPair kp = kpg.genKeyPair();
RSAPublicKey publicKey = (RSAPublicKey) kp.getPublic();
handler.setPublicKey(publicKey);
Properties props = getProperties();
handler.init(props);
SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
privateKey);
Cookie cookie = new Cookie("hadoop-jwt", "ljm" + jwt.serialize());
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer(SERVICE_URL));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
SERVICE_URL);
AuthenticationToken token = handler.alternateAuthenticate(request,
response);
Mockito.verify(response).sendRedirect(REDIRECT_LOCATION);
} catch (ServletException se) {
fail("alternateAuthentication should NOT have thrown a ServletException");
} catch (AuthenticationException ae) {
fail("alternateAuthentication should NOT have thrown a AuthenticationException");
}
}
@Test
public void testFailedSignatureValidationJWT() throws Exception {
try {
// Create a public key that doesn't match the one needed to
// verify the signature - in order to make it fail verification...
KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
kpg.initialize(2048);
KeyPair kp = kpg.genKeyPair();
RSAPublicKey publicKey = (RSAPublicKey) kp.getPublic();
handler.setPublicKey(publicKey);
Properties props = getProperties();
handler.init(props);
SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
privateKey);
Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer(SERVICE_URL));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
SERVICE_URL);
AuthenticationToken token = handler.alternateAuthenticate(request,
response);
Mockito.verify(response).sendRedirect(REDIRECT_LOCATION);
} catch (ServletException se) {
fail("alternateAuthentication should NOT have thrown a ServletException");
} catch (AuthenticationException ae) {
fail("alternateAuthentication should NOT have thrown a AuthenticationException");
}
}
@Test
public void testExpiredJWT() throws Exception {
try {
handler.setPublicKey(publicKey);
Properties props = getProperties();
handler.init(props);
SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() - 1000),
privateKey);
Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer(SERVICE_URL));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
SERVICE_URL);
AuthenticationToken token = handler.alternateAuthenticate(request,
response);
Mockito.verify(response).sendRedirect(REDIRECT_LOCATION);
} catch (ServletException se) {
fail("alternateAuthentication should NOT have thrown a ServletException");
} catch (AuthenticationException ae) {
fail("alternateAuthentication should NOT have thrown a AuthenticationException");
}
}
@Test
public void testInvalidAudienceJWT() throws Exception {
try {
handler.setPublicKey(publicKey);
Properties props = getProperties();
props
.put(JWTRedirectAuthenticationHandler.EXPECTED_JWT_AUDIENCES, "foo");
handler.init(props);
SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
privateKey);
Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer(SERVICE_URL));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
SERVICE_URL);
AuthenticationToken token = handler.alternateAuthenticate(request,
response);
Mockito.verify(response).sendRedirect(REDIRECT_LOCATION);
} catch (ServletException se) {
fail("alternateAuthentication should NOT have thrown a ServletException");
} catch (AuthenticationException ae) {
fail("alternateAuthentication should NOT have thrown a AuthenticationException");
}
}
@Test
public void testValidAudienceJWT() throws Exception {
try {
handler.setPublicKey(publicKey);
Properties props = getProperties();
props
.put(JWTRedirectAuthenticationHandler.EXPECTED_JWT_AUDIENCES, "bar");
handler.init(props);
SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
privateKey);
Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer(SERVICE_URL));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
SERVICE_URL);
AuthenticationToken token = handler.alternateAuthenticate(request,
response);
Assert.assertEquals("bob", token.getUserName());
} catch (ServletException se) {
fail("alternateAuthentication should NOT have thrown a ServletException");
} catch (AuthenticationException ae) {
fail("alternateAuthentication should NOT have thrown an AuthenticationException");
}
}
@Test
public void testValidJWT() throws Exception {
try {
handler.setPublicKey(publicKey);
Properties props = getProperties();
handler.init(props);
SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000),
privateKey);
Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer(SERVICE_URL));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
SERVICE_URL);
AuthenticationToken token = handler.alternateAuthenticate(request,
response);
Assert.assertNotNull("Token should not be null.", token);
Assert.assertEquals("alice", token.getUserName());
} catch (ServletException se) {
fail("alternateAuthentication should NOT have thrown a ServletException.");
} catch (AuthenticationException ae) {
fail("alternateAuthentication should NOT have thrown an AuthenticationException");
}
}
@Before
public void setup() throws Exception, NoSuchAlgorithmException {
setupKerberosRequirements();
KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
kpg.initialize(2048);
KeyPair kp = kpg.genKeyPair();
publicKey = (RSAPublicKey) kp.getPublic();
privateKey = (RSAPrivateKey) kp.getPrivate();
handler = new JWTRedirectAuthenticationHandler();
}
protected void setupKerberosRequirements() throws Exception {
String[] keytabUsers = new String[] { "HTTP/host1", "HTTP/host2",
"HTTP2/host1", "XHTTP/host" };
String keytab = KerberosTestUtils.getKeytabFile();
getKdc().createPrincipal(new File(keytab), keytabUsers);
}
@After
public void teardown() throws Exception {
handler.destroy();
}
protected Properties getProperties() {
Properties props = new Properties();
props.setProperty(
JWTRedirectAuthenticationHandler.AUTHENTICATION_PROVIDER_URL,
"https://localhost:8443/authserver");
props.setProperty("kerberos.principal",
KerberosTestUtils.getServerPrincipal());
props.setProperty("kerberos.keytab", KerberosTestUtils.getKeytabFile());
return props;
}
protected SignedJWT getJWT(String sub, Date expires, RSAPrivateKey privateKey)
throws Exception {
JWTClaimsSet claimsSet = new JWTClaimsSet();
claimsSet.setSubject(sub);
claimsSet.setIssueTime(new Date(new Date().getTime()));
claimsSet.setIssuer("https://c2id.com");
claimsSet.setCustomClaim("scope", "openid");
claimsSet.setExpirationTime(expires);
List<String> aud = new ArrayList<String>();
aud.add("bar");
claimsSet.setAudience("bar");
JWSHeader header = new JWSHeader.Builder(JWSAlgorithm.RS256).build();
SignedJWT signedJWT = new SignedJWT(header, claimsSet);
Base64URL sigInput = Base64URL.encode(signedJWT.getSigningInput());
JWSSigner signer = new RSASSASigner(privateKey);
signedJWT.sign(signer);
return signedJWT;
}
}
| 16,078 | 37.374702 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.client;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.List;
import java.util.Map;
/**
* The {@link AuthenticatedURL} class enables the use of the JDK {@link URL} class
* against HTTP endpoints protected with the {@link AuthenticationFilter}.
* <p>
* The authentication mechanisms supported by default are Hadoop Simple authentication
* (also known as pseudo authentication) and Kerberos SPNEGO authentication.
* <p>
* Additional authentication mechanisms can be supported via {@link Authenticator} implementations.
* <p>
* The default {@link Authenticator} is the {@link KerberosAuthenticator} class which supports
* automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication.
* <p>
* <code>AuthenticatedURL</code> instances are not thread-safe.
* <p>
* The usage pattern of the {@link AuthenticatedURL} is:
* <pre>
*
* // establishing an initial connection
*
* URL url = new URL("http://foo:8080/bar");
* AuthenticatedURL.Token token = new AuthenticatedURL.Token();
* AuthenticatedURL aUrl = new AuthenticatedURL();
* HttpURLConnection conn = new AuthenticatedURL(url, token).openConnection();
* ....
* // use the 'conn' instance
* ....
*
* // establishing a follow up connection using a token from the previous connection
*
* HttpURLConnection conn = new AuthenticatedURL(url, token).openConnection();
* ....
* // use the 'conn' instance
* ....
*
* </pre>
*/
public class AuthenticatedURL {
/**
* Name of the HTTP cookie used for the authentication token between the client and the server.
*/
public static final String AUTH_COOKIE = "hadoop.auth";
private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "=";
/**
* Client side authentication token.
*/
public static class Token {
private String token;
/**
* Creates a token.
*/
public Token() {
}
/**
* Creates a token using an existing string representation of the token.
*
* @param tokenStr string representation of the tokenStr.
*/
public Token(String tokenStr) {
if (tokenStr == null) {
throw new IllegalArgumentException("tokenStr cannot be null");
}
set(tokenStr);
}
/**
* Returns if a token from the server has been set.
*
* @return if a token from the server has been set.
*/
public boolean isSet() {
return token != null;
}
/**
* Sets a token.
*
* @param tokenStr string representation of the tokenStr.
*/
void set(String tokenStr) {
token = tokenStr;
}
/**
* Returns the string representation of the token.
*
* @return the string representation of the token.
*/
@Override
public String toString() {
return token;
}
}
private static Class<? extends Authenticator> DEFAULT_AUTHENTICATOR = KerberosAuthenticator.class;
/**
* Sets the default {@link Authenticator} class to use when an {@link AuthenticatedURL} instance
* is created without specifying an authenticator.
*
* @param authenticator the authenticator class to use as default.
*/
public static void setDefaultAuthenticator(Class<? extends Authenticator> authenticator) {
DEFAULT_AUTHENTICATOR = authenticator;
}
/**
* Returns the default {@link Authenticator} class to use when an {@link AuthenticatedURL} instance
* is created without specifying an authenticator.
*
* @return the authenticator class to use as default.
*/
public static Class<? extends Authenticator> getDefaultAuthenticator() {
return DEFAULT_AUTHENTICATOR;
}
private Authenticator authenticator;
private ConnectionConfigurator connConfigurator;
/**
* Creates an {@link AuthenticatedURL}.
*/
public AuthenticatedURL() {
this(null);
}
/**
* Creates an <code>AuthenticatedURL</code>.
*
* @param authenticator the {@link Authenticator} instance to use, if <code>null</code> a {@link
* KerberosAuthenticator} is used.
*/
public AuthenticatedURL(Authenticator authenticator) {
this(authenticator, null);
}
/**
* Creates an <code>AuthenticatedURL</code>.
*
* @param authenticator the {@link Authenticator} instance to use, if <code>null</code> a {@link
* KerberosAuthenticator} is used.
* @param connConfigurator a connection configurator.
*/
public AuthenticatedURL(Authenticator authenticator,
ConnectionConfigurator connConfigurator) {
try {
this.authenticator = (authenticator != null) ? authenticator : DEFAULT_AUTHENTICATOR.newInstance();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
this.connConfigurator = connConfigurator;
this.authenticator.setConnectionConfigurator(connConfigurator);
}
/**
* Returns the {@link Authenticator} instance used by the
* <code>AuthenticatedURL</code>.
*
* @return the {@link Authenticator} instance
*/
protected Authenticator getAuthenticator() {
return authenticator;
}
/**
* Returns an authenticated {@link HttpURLConnection}.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
*
* @return an authenticated {@link HttpURLConnection}.
*
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public HttpURLConnection openConnection(URL url, Token token) throws IOException, AuthenticationException {
if (url == null) {
throw new IllegalArgumentException("url cannot be NULL");
}
if (!url.getProtocol().equalsIgnoreCase("http") && !url.getProtocol().equalsIgnoreCase("https")) {
throw new IllegalArgumentException("url must be for a HTTP or HTTPS resource");
}
if (token == null) {
throw new IllegalArgumentException("token cannot be NULL");
}
authenticator.authenticate(url, token);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
if (connConfigurator != null) {
conn = connConfigurator.configure(conn);
}
injectToken(conn, token);
return conn;
}
/**
* Helper method that injects an authentication token to send with a connection.
*
* @param conn connection to inject the authentication token into.
* @param token authentication token to inject.
*/
public static void injectToken(HttpURLConnection conn, Token token) {
String t = token.token;
if (t != null) {
if (!t.startsWith("\"")) {
t = "\"" + t + "\"";
}
conn.addRequestProperty("Cookie", AUTH_COOKIE_EQ + t);
}
}
/**
* Helper method that extracts an authentication token received from a connection.
* <p>
* This method is used by {@link Authenticator} implementations.
*
* @param conn connection to extract the authentication token from.
* @param token the authentication token.
*
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public static void extractToken(HttpURLConnection conn, Token token) throws IOException, AuthenticationException {
int respCode = conn.getResponseCode();
if (respCode == HttpURLConnection.HTTP_OK
|| respCode == HttpURLConnection.HTTP_CREATED
|| respCode == HttpURLConnection.HTTP_ACCEPTED) {
Map<String, List<String>> headers = conn.getHeaderFields();
List<String> cookies = headers.get("Set-Cookie");
if (cookies != null) {
for (String cookie : cookies) {
if (cookie.startsWith(AUTH_COOKIE_EQ)) {
String value = cookie.substring(AUTH_COOKIE_EQ.length());
int separator = value.indexOf(";");
if (separator > -1) {
value = value.substring(0, separator);
}
if (value.length() > 0) {
token.set(value);
}
}
}
}
} else {
token.set(null);
throw new AuthenticationException("Authentication failed, status: " + conn.getResponseCode() +
", message: " + conn.getResponseMessage());
}
}
}
| 8,979 | 31.071429 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.client;
import java.io.IOException;
import java.net.URL;
/**
* Interface for client authentication mechanisms.
* <p>
* Implementations are use-once instances, they don't need to be thread safe.
*/
public interface Authenticator {
/**
* Sets a {@link ConnectionConfigurator} instance to use for
* configuring connections.
*
* @param configurator the {@link ConnectionConfigurator} instance.
*/
public void setConnectionConfigurator(ConnectionConfigurator configurator);
/**
* Authenticates against a URL and returns a {@link AuthenticatedURL.Token} to be
* used by subsequent requests.
*
* @param url the URl to authenticate against.
* @param token the authentication token being used for the user.
*
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication error occurred.
*/
public void authenticate(URL url, AuthenticatedURL.Token token) throws IOException, AuthenticationException;
}
| 1,634 | 33.0625 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.client;
/**
* Exception thrown when an authentication error occurrs.
*/
public class AuthenticationException extends Exception {
static final long serialVersionUID = 0;
/**
* Creates an {@link AuthenticationException}.
*
* @param cause original exception.
*/
public AuthenticationException(Throwable cause) {
super(cause);
}
/**
* Creates an {@link AuthenticationException}.
*
* @param msg exception message.
*/
public AuthenticationException(String msg) {
super(msg);
}
/**
* Creates an {@link AuthenticationException}.
*
* @param msg exception message.
* @param cause original exception.
*/
public AuthenticationException(String msg, Throwable cause) {
super(msg, cause);
}
}
| 1,400 | 26.470588 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.client;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.security.authentication.util.AuthToken;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.GSSName;
import org.ietf.jgss.Oid;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosKey;
import javax.security.auth.kerberos.KerberosTicket;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.Map;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
/**
* The {@link KerberosAuthenticator} implements the Kerberos SPNEGO authentication sequence.
* <p>
* It uses the default principal for the Kerberos cache (normally set via kinit).
* <p>
* It falls back to the {@link PseudoAuthenticator} if the HTTP endpoint does not trigger an SPNEGO authentication
* sequence.
*/
public class KerberosAuthenticator implements Authenticator {
private static Logger LOG = LoggerFactory.getLogger(
KerberosAuthenticator.class);
/**
* HTTP header used by the SPNEGO server endpoint during an authentication sequence.
*/
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
/**
* HTTP header used by the SPNEGO client endpoint during an authentication sequence.
*/
public static final String AUTHORIZATION = "Authorization";
/**
* HTTP header prefix used by the SPNEGO client/server endpoints during an authentication sequence.
*/
public static final String NEGOTIATE = "Negotiate";
private static final String AUTH_HTTP_METHOD = "OPTIONS";
/*
* Defines the Kerberos configuration that will be used to obtain the Kerberos principal from the
* Kerberos cache.
*/
private static class KerberosConfiguration extends Configuration {
private static final String OS_LOGIN_MODULE_NAME;
private static final boolean windows = System.getProperty("os.name").startsWith("Windows");
private static final boolean is64Bit = System.getProperty("os.arch").contains("64");
private static final boolean aix = System.getProperty("os.name").equals("AIX");
/* Return the OS login module class name */
private static String getOSLoginModuleName() {
if (IBM_JAVA) {
if (windows) {
return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule"
: "com.ibm.security.auth.module.NTLoginModule";
} else if (aix) {
return is64Bit ? "com.ibm.security.auth.module.AIX64LoginModule"
: "com.ibm.security.auth.module.AIXLoginModule";
} else {
return "com.ibm.security.auth.module.LinuxLoginModule";
}
} else {
return windows ? "com.sun.security.auth.module.NTLoginModule"
: "com.sun.security.auth.module.UnixLoginModule";
}
}
static {
OS_LOGIN_MODULE_NAME = getOSLoginModuleName();
}
private static final AppConfigurationEntry OS_SPECIFIC_LOGIN =
new AppConfigurationEntry(OS_LOGIN_MODULE_NAME,
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
new HashMap<String, String>());
private static final Map<String, String> USER_KERBEROS_OPTIONS = new HashMap<String, String>();
static {
String ticketCache = System.getenv("KRB5CCNAME");
if (IBM_JAVA) {
USER_KERBEROS_OPTIONS.put("useDefaultCcache", "true");
} else {
USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
}
if (ticketCache != null) {
if (IBM_JAVA) {
// The first value searched when "useDefaultCcache" is used.
System.setProperty("KRB5CCNAME", ticketCache);
} else {
USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
}
}
USER_KERBEROS_OPTIONS.put("renewTGT", "true");
}
private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL,
USER_KERBEROS_OPTIONS);
private static final AppConfigurationEntry[] USER_KERBEROS_CONF =
new AppConfigurationEntry[]{OS_SPECIFIC_LOGIN, USER_KERBEROS_LOGIN};
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
return USER_KERBEROS_CONF;
}
}
private URL url;
private HttpURLConnection conn;
private Base64 base64;
private ConnectionConfigurator connConfigurator;
/**
* Sets a {@link ConnectionConfigurator} instance to use for
* configuring connections.
*
* @param configurator the {@link ConnectionConfigurator} instance.
*/
@Override
public void setConnectionConfigurator(ConnectionConfigurator configurator) {
connConfigurator = configurator;
}
/**
* Performs SPNEGO authentication against the specified URL.
* <p>
* If a token is given it does a NOP and returns the given token.
* <p>
* If no token is given, it will perform the SPNEGO authentication sequence using an
* HTTP <code>OPTIONS</code> request.
*
* @param url the URl to authenticate against.
* @param token the authentication token being used for the user.
*
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication error occurred.
*/
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
if (!token.isSet()) {
this.url = url;
base64 = new Base64(0);
conn = (HttpURLConnection) url.openConnection();
if (connConfigurator != null) {
conn = connConfigurator.configure(conn);
}
conn.setRequestMethod(AUTH_HTTP_METHOD);
conn.connect();
boolean needFallback = false;
if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
LOG.debug("JDK performed authentication on our behalf.");
// If the JDK already did the SPNEGO back-and-forth for
// us, just pull out the token.
AuthenticatedURL.extractToken(conn, token);
if (isTokenKerberos(token)) {
return;
}
needFallback = true;
}
if (!needFallback && isNegotiate()) {
LOG.debug("Performing our own SPNEGO sequence.");
doSpnegoSequence(token);
} else {
LOG.debug("Using fallback authenticator sequence.");
Authenticator auth = getFallBackAuthenticator();
// Make sure that the fall back authenticator have the same
// ConnectionConfigurator, since the method might be overridden.
// Otherwise the fall back authenticator might not have the information
// to make the connection (e.g., SSL certificates)
auth.setConnectionConfigurator(connConfigurator);
auth.authenticate(url, token);
}
}
}
/**
* If the specified URL does not support SPNEGO authentication, a fallback {@link Authenticator} will be used.
* <p>
* This implementation returns a {@link PseudoAuthenticator}.
*
* @return the fallback {@link Authenticator}.
*/
protected Authenticator getFallBackAuthenticator() {
Authenticator auth = new PseudoAuthenticator();
if (connConfigurator != null) {
auth.setConnectionConfigurator(connConfigurator);
}
return auth;
}
/*
* Check if the passed token is of type "kerberos" or "kerberos-dt"
*/
private boolean isTokenKerberos(AuthenticatedURL.Token token)
throws AuthenticationException {
if (token.isSet()) {
AuthToken aToken = AuthToken.parse(token.toString());
if (aToken.getType().equals("kerberos") ||
aToken.getType().equals("kerberos-dt")) {
return true;
}
}
return false;
}
/*
* Indicates if the response is starting a SPNEGO negotiation.
*/
private boolean isNegotiate() throws IOException {
boolean negotiate = false;
if (conn.getResponseCode() == HttpURLConnection.HTTP_UNAUTHORIZED) {
String authHeader = conn.getHeaderField(WWW_AUTHENTICATE);
negotiate = authHeader != null && authHeader.trim().startsWith(NEGOTIATE);
}
return negotiate;
}
/**
* Implements the SPNEGO authentication sequence interaction using the current default principal
* in the Kerberos cache (normally set via kinit).
*
* @param token the authentication token being used for the user.
*
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication error occurred.
*/
private void doSpnegoSequence(AuthenticatedURL.Token token) throws IOException, AuthenticationException {
try {
AccessControlContext context = AccessController.getContext();
Subject subject = Subject.getSubject(context);
if (subject == null
|| (subject.getPrivateCredentials(KerberosKey.class).isEmpty()
&& subject.getPrivateCredentials(KerberosTicket.class).isEmpty())) {
LOG.debug("No subject in context, logging in");
subject = new Subject();
LoginContext login = new LoginContext("", subject,
null, new KerberosConfiguration());
login.login();
}
if (LOG.isDebugEnabled()) {
LOG.debug("Using subject: " + subject);
}
Subject.doAs(subject, new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
GSSContext gssContext = null;
try {
GSSManager gssManager = GSSManager.getInstance();
String servicePrincipal = KerberosUtil.getServicePrincipal("HTTP",
KerberosAuthenticator.this.url.getHost());
Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
GSSName serviceName = gssManager.createName(servicePrincipal,
oid);
oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
gssContext = gssManager.createContext(serviceName, oid, null,
GSSContext.DEFAULT_LIFETIME);
gssContext.requestCredDeleg(true);
gssContext.requestMutualAuth(true);
byte[] inToken = new byte[0];
byte[] outToken;
boolean established = false;
// Loop while the context is still not established
while (!established) {
outToken = gssContext.initSecContext(inToken, 0, inToken.length);
if (outToken != null) {
sendToken(outToken);
}
if (!gssContext.isEstablished()) {
inToken = readToken();
} else {
established = true;
}
}
} finally {
if (gssContext != null) {
gssContext.dispose();
gssContext = null;
}
}
return null;
}
});
} catch (PrivilegedActionException ex) {
throw new AuthenticationException(ex.getException());
} catch (LoginException ex) {
throw new AuthenticationException(ex);
}
AuthenticatedURL.extractToken(conn, token);
}
/*
* Sends the Kerberos token to the server.
*/
private void sendToken(byte[] outToken) throws IOException {
String token = base64.encodeToString(outToken);
conn = (HttpURLConnection) url.openConnection();
if (connConfigurator != null) {
conn = connConfigurator.configure(conn);
}
conn.setRequestMethod(AUTH_HTTP_METHOD);
conn.setRequestProperty(AUTHORIZATION, NEGOTIATE + " " + token);
conn.connect();
}
/*
* Retrieves the Kerberos token returned by the server.
*/
private byte[] readToken() throws IOException, AuthenticationException {
int status = conn.getResponseCode();
if (status == HttpURLConnection.HTTP_OK || status == HttpURLConnection.HTTP_UNAUTHORIZED) {
String authHeader = conn.getHeaderField(WWW_AUTHENTICATE);
if (authHeader == null || !authHeader.trim().startsWith(NEGOTIATE)) {
throw new AuthenticationException("Invalid SPNEGO sequence, '" + WWW_AUTHENTICATE +
"' header incorrect: " + authHeader);
}
String negotiation = authHeader.trim().substring((NEGOTIATE + " ").length()).trim();
return base64.decode(negotiation);
}
throw new AuthenticationException("Invalid SPNEGO sequence, status code: " + status);
}
}
| 13,796 | 36.188679 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.client;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
/**
* The {@link PseudoAuthenticator} implementation provides an authentication equivalent to Hadoop's
* Simple authentication, it trusts the value of the 'user.name' Java System property.
* <p>
* The 'user.name' value is propagated using an additional query string parameter {@link #USER_NAME} ('user.name').
*/
public class PseudoAuthenticator implements Authenticator {
/**
* Name of the additional parameter that carries the 'user.name' value.
*/
public static final String USER_NAME = "user.name";
private static final String USER_NAME_EQ = USER_NAME + "=";
private ConnectionConfigurator connConfigurator;
/**
* Sets a {@link ConnectionConfigurator} instance to use for
* configuring connections.
*
* @param configurator the {@link ConnectionConfigurator} instance.
*/
@Override
public void setConnectionConfigurator(ConnectionConfigurator configurator) {
connConfigurator = configurator;
}
/**
* Performs simple authentication against the specified URL.
* <p>
* If a token is given it does a NOP and returns the given token.
* <p>
* If no token is given, it will perform an HTTP <code>OPTIONS</code> request injecting an additional
* parameter {@link #USER_NAME} in the query string with the value returned by the {@link #getUserName()}
* method.
* <p>
* If the response is successful it will update the authentication token.
*
* @param url the URl to authenticate against.
* @param token the authencation token being used for the user.
*
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication error occurred.
*/
@Override
public void authenticate(URL url, AuthenticatedURL.Token token) throws IOException, AuthenticationException {
String strUrl = url.toString();
String paramSeparator = (strUrl.contains("?")) ? "&" : "?";
strUrl += paramSeparator + USER_NAME_EQ + getUserName();
url = new URL(strUrl);
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
if (connConfigurator != null) {
conn = connConfigurator.configure(conn);
}
conn.setRequestMethod("OPTIONS");
conn.connect();
AuthenticatedURL.extractToken(conn, token);
}
/**
* Returns the current user name.
* <p>
* This implementation returns the value of the Java system property 'user.name'
*
* @return the current user name.
*/
protected String getUserName() {
return System.getProperty("user.name");
}
}
| 3,253 | 34.758242 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/ConnectionConfigurator.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.client;
import java.io.IOException;
import java.net.HttpURLConnection;
/**
* Interface to configure {@link HttpURLConnection} created by
* {@link AuthenticatedURL} instances.
*/
public interface ConnectionConfigurator {
/**
* Configures the given {@link HttpURLConnection} instance.
*
* @param conn the {@link HttpURLConnection} instance to configure.
* @return the configured {@link HttpURLConnection} instance.
*
* @throws IOException if an IO error occurred.
*/
public HttpURLConnection configure(HttpURLConnection conn) throws IOException;
}
| 1,223 | 32.081081 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/FileSignerSecretProvider.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import com.google.common.base.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import javax.servlet.ServletContext;
import java.io.*;
import java.nio.charset.Charset;
import java.util.Properties;
/**
* A SignerSecretProvider that simply loads a secret from a specified file.
*/
@InterfaceStability.Unstable
@InterfaceAudience.Private
public class FileSignerSecretProvider extends SignerSecretProvider {
private byte[] secret;
private byte[][] secrets;
public FileSignerSecretProvider() {}
@Override
public void init(Properties config, ServletContext servletContext,
long tokenValidity) throws Exception {
String signatureSecretFile = config.getProperty(
AuthenticationFilter.SIGNATURE_SECRET_FILE, null);
Reader reader = null;
if (signatureSecretFile != null) {
try {
StringBuilder sb = new StringBuilder();
reader = new InputStreamReader(
new FileInputStream(signatureSecretFile), Charsets.UTF_8);
int c = reader.read();
while (c > -1) {
sb.append((char) c);
c = reader.read();
}
secret = sb.toString().getBytes(Charset.forName("UTF-8"));
} catch (IOException ex) {
throw new RuntimeException("Could not read signature secret file: " +
signatureSecretFile);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
// nothing to do
}
}
}
}
secrets = new byte[][]{secret};
}
@Override
public byte[] getCurrentSecret() {
return secret;
}
@Override
public byte[][] getAllSecrets() {
return secrets;
}
}
| 2,594 | 29.529412 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authentication.util;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.directory.server.kerberos.shared.keytab.Keytab;
import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.Oid;
public class KerberosUtil {
/* Return the Kerberos login module name */
public static String getKrb5LoginModuleName() {
return System.getProperty("java.vendor").contains("IBM")
? "com.ibm.security.auth.module.Krb5LoginModule"
: "com.sun.security.auth.module.Krb5LoginModule";
}
public static Oid getOidInstance(String oidName)
throws ClassNotFoundException, GSSException, NoSuchFieldException,
IllegalAccessException {
Class<?> oidClass;
if (IBM_JAVA) {
if ("NT_GSS_KRB5_PRINCIPAL".equals(oidName)) {
// IBM JDK GSSUtil class does not have field for krb5 principal oid
return new Oid("1.2.840.113554.1.2.2.1");
}
oidClass = Class.forName("com.ibm.security.jgss.GSSUtil");
} else {
oidClass = Class.forName("sun.security.jgss.GSSUtil");
}
Field oidField = oidClass.getDeclaredField(oidName);
return (Oid)oidField.get(oidClass);
}
public static String getDefaultRealm()
throws ClassNotFoundException, NoSuchMethodException,
IllegalArgumentException, IllegalAccessException,
InvocationTargetException {
Object kerbConf;
Class<?> classRef;
Method getInstanceMethod;
Method getDefaultRealmMethod;
if (System.getProperty("java.vendor").contains("IBM")) {
classRef = Class.forName("com.ibm.security.krb5.internal.Config");
} else {
classRef = Class.forName("sun.security.krb5.Config");
}
getInstanceMethod = classRef.getMethod("getInstance", new Class[0]);
kerbConf = getInstanceMethod.invoke(classRef, new Object[0]);
getDefaultRealmMethod = classRef.getDeclaredMethod("getDefaultRealm",
new Class[0]);
return (String)getDefaultRealmMethod.invoke(kerbConf, new Object[0]);
}
/* Return fqdn of the current host */
static String getLocalHostName() throws UnknownHostException {
return InetAddress.getLocalHost().getCanonicalHostName();
}
/**
* Create Kerberos principal for a given service and hostname. It converts
* hostname to lower case. If hostname is null or "0.0.0.0", it uses
* dynamically looked-up fqdn of the current host instead.
*
* @param service
* Service for which you want to generate the principal.
* @param hostname
* Fully-qualified domain name.
* @return Converted Kerberos principal name.
* @throws UnknownHostException
* If no IP address for the local host could be found.
*/
public static final String getServicePrincipal(String service, String hostname)
throws UnknownHostException {
String fqdn = hostname;
if (null == fqdn || fqdn.equals("") || fqdn.equals("0.0.0.0")) {
fqdn = getLocalHostName();
}
// convert hostname to lowercase as kerberos does not work with hostnames
// with uppercase characters.
return service + "/" + fqdn.toLowerCase(Locale.ENGLISH);
}
/**
* Get all the unique principals present in the keytabfile.
*
* @param keytabFileName
* Name of the keytab file to be read.
* @return list of unique principals in the keytab.
* @throws IOException
* If keytab entries cannot be read from the file.
*/
static final String[] getPrincipalNames(String keytabFileName) throws IOException {
Keytab keytab = Keytab.read(new File(keytabFileName));
Set<String> principals = new HashSet<String>();
List<KeytabEntry> entries = keytab.getEntries();
for (KeytabEntry entry: entries){
principals.add(entry.getPrincipalName().replace("\\", "/"));
}
return principals.toArray(new String[0]);
}
/**
* Get all the unique principals from keytabfile which matches a pattern.
*
* @param keytab Name of the keytab file to be read.
* @param pattern pattern to be matched.
* @return list of unique principals which matches the pattern.
* @throws IOException if cannot get the principal name
*/
public static final String[] getPrincipalNames(String keytab,
Pattern pattern) throws IOException {
String[] principals = getPrincipalNames(keytab);
if (principals.length != 0) {
List<String> matchingPrincipals = new ArrayList<String>();
for (String principal : principals) {
if (pattern.matcher(principal).matches()) {
matchingPrincipals.add(principal);
}
}
principals = matchingPrincipals.toArray(new String[0]);
}
return principals;
}
}
| 5,978 | 36.841772 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authentication.util;
import java.io.ByteArrayInputStream;
import java.io.UnsupportedEncodingException;
import java.security.PublicKey;
import java.security.cert.CertificateException;
import java.security.cert.CertificateFactory;
import java.security.cert.X509Certificate;
import java.security.interfaces.RSAPublicKey;
import javax.servlet.ServletException;
public class CertificateUtil {
private static final String PEM_HEADER = "-----BEGIN CERTIFICATE-----\n";
private static final String PEM_FOOTER = "\n-----END CERTIFICATE-----";
/**
* Gets an RSAPublicKey from the provided PEM encoding.
*
* @param pem
* - the pem encoding from config without the header and footer
* @return RSAPublicKey the RSA public key
* @throws ServletException thrown if a processing error occurred
*/
public static RSAPublicKey parseRSAPublicKey(String pem) throws ServletException {
String fullPem = PEM_HEADER + pem + PEM_FOOTER;
PublicKey key = null;
try {
CertificateFactory fact = CertificateFactory.getInstance("X.509");
ByteArrayInputStream is = new ByteArrayInputStream(
fullPem.getBytes("UTF8"));
X509Certificate cer = (X509Certificate) fact.generateCertificate(is);
key = cer.getPublicKey();
} catch (CertificateException ce) {
String message = null;
if (pem.startsWith(PEM_HEADER)) {
message = "CertificateException - be sure not to include PEM header "
+ "and footer in the PEM configuration element.";
} else {
message = "CertificateException - PEM may be corrupt";
}
throw new ServletException(message, ce);
} catch (UnsupportedEncodingException uee) {
throw new ServletException(uee);
}
return (RSAPublicKey) key;
}
}
| 2,626 | 38.208955 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerException.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
/**
* Exception thrown by {@link Signer} when a string signature is invalid.
*/
public class SignerException extends Exception {
static final long serialVersionUID = 0;
/**
* Creates an exception instance.
*
* @param msg message for the exception.
*/
public SignerException(String msg) {
super(msg);
}
}
| 983 | 29.75 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import org.apache.commons.codec.binary.Base64;
import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
/**
* Signs strings and verifies signed strings using a SHA digest.
*/
public class Signer {
private static final String SIGNATURE = "&s=";
private SignerSecretProvider secretProvider;
/**
* Creates a Signer instance using the specified SignerSecretProvider. The
* SignerSecretProvider should already be initialized.
*
* @param secretProvider The SignerSecretProvider to use
*/
public Signer(SignerSecretProvider secretProvider) {
if (secretProvider == null) {
throw new IllegalArgumentException("secretProvider cannot be NULL");
}
this.secretProvider = secretProvider;
}
/**
* Returns a signed string.
*
* @param str string to sign.
*
* @return the signed string.
*/
public synchronized String sign(String str) {
if (str == null || str.length() == 0) {
throw new IllegalArgumentException("NULL or empty string to sign");
}
byte[] secret = secretProvider.getCurrentSecret();
String signature = computeSignature(secret, str);
return str + SIGNATURE + signature;
}
/**
* Verifies a signed string and extracts the original string.
*
* @param signedStr the signed string to verify and extract.
*
* @return the extracted original string.
*
* @throws SignerException thrown if the given string is not a signed string or if the signature is invalid.
*/
public String verifyAndExtract(String signedStr) throws SignerException {
int index = signedStr.lastIndexOf(SIGNATURE);
if (index == -1) {
throw new SignerException("Invalid signed text: " + signedStr);
}
String originalSignature = signedStr.substring(index + SIGNATURE.length());
String rawValue = signedStr.substring(0, index);
checkSignatures(rawValue, originalSignature);
return rawValue;
}
/**
* Returns then signature of a string.
*
* @param secret The secret to use
* @param str string to sign.
*
* @return the signature for the string.
*/
protected String computeSignature(byte[] secret, String str) {
try {
MessageDigest md = MessageDigest.getInstance("SHA");
md.update(str.getBytes(Charset.forName("UTF-8")));
md.update(secret);
byte[] digest = md.digest();
return new Base64(0).encodeToString(digest);
} catch (NoSuchAlgorithmException ex) {
throw new RuntimeException("It should not happen, " + ex.getMessage(), ex);
}
}
protected void checkSignatures(String rawValue, String originalSignature)
throws SignerException {
boolean isValid = false;
byte[][] secrets = secretProvider.getAllSecrets();
for (int i = 0; i < secrets.length; i++) {
byte[] secret = secrets[i];
if (secret != null) {
String currentSignature = computeSignature(secret, rawValue);
if (originalSignature.equals(currentSignature)) {
isValid = true;
break;
}
}
}
if (!isValid) {
throw new SignerException("Invalid signature");
}
}
}
| 3,815 | 31.338983 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/AuthToken.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import java.security.Principal;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
/**
*/
public class AuthToken implements Principal {
/**
* Constant that identifies an anonymous request.
*/
private static final String ATTR_SEPARATOR = "&";
private static final String USER_NAME = "u";
private static final String PRINCIPAL = "p";
private static final String EXPIRES = "e";
private static final String TYPE = "t";
private final static Set<String> ATTRIBUTES =
new HashSet<String>(Arrays.asList(USER_NAME, PRINCIPAL, EXPIRES, TYPE));
private String userName;
private String principal;
private String type;
private long expires;
private String tokenStr;
protected AuthToken() {
userName = null;
principal = null;
type = null;
expires = -1;
tokenStr = "ANONYMOUS";
generateToken();
}
private static final String ILLEGAL_ARG_MSG = " is NULL, empty or contains a '" + ATTR_SEPARATOR + "'";
/**
* Creates an authentication token.
*
* @param userName user name.
* @param principal principal (commonly matches the user name, with Kerberos is the full/long principal
* name while the userName is the short name).
* @param type the authentication mechanism name.
* (<code>System.currentTimeMillis() + validityPeriod</code>).
*/
public AuthToken(String userName, String principal, String type) {
checkForIllegalArgument(userName, "userName");
checkForIllegalArgument(principal, "principal");
checkForIllegalArgument(type, "type");
this.userName = userName;
this.principal = principal;
this.type = type;
this.expires = -1;
}
/**
* Check if the provided value is invalid. Throw an error if it is invalid, NOP otherwise.
*
* @param value the value to check.
* @param name the parameter name to use in an error message if the value is invalid.
*/
protected static void checkForIllegalArgument(String value, String name) {
if (value == null || value.length() == 0 || value.contains(ATTR_SEPARATOR)) {
throw new IllegalArgumentException(name + ILLEGAL_ARG_MSG);
}
}
/**
* Sets the expiration of the token.
*
* @param expires expiration time of the token in milliseconds since the epoch.
*/
public void setExpires(long expires) {
this.expires = expires;
generateToken();
}
/**
* Returns true if the token has expired.
*
* @return true if the token has expired.
*/
public boolean isExpired() {
return getExpires() != -1 && System.currentTimeMillis() > getExpires();
}
/**
* Generates the token.
*/
private void generateToken() {
StringBuffer sb = new StringBuffer();
sb.append(USER_NAME).append("=").append(getUserName()).append(ATTR_SEPARATOR);
sb.append(PRINCIPAL).append("=").append(getName()).append(ATTR_SEPARATOR);
sb.append(TYPE).append("=").append(getType()).append(ATTR_SEPARATOR);
sb.append(EXPIRES).append("=").append(getExpires());
tokenStr = sb.toString();
}
/**
* Returns the user name.
*
* @return the user name.
*/
public String getUserName() {
return userName;
}
/**
* Returns the principal name (this method name comes from the JDK {@link Principal} interface).
*
* @return the principal name.
*/
@Override
public String getName() {
return principal;
}
/**
* Returns the authentication mechanism of the token.
*
* @return the authentication mechanism of the token.
*/
public String getType() {
return type;
}
/**
* Returns the expiration time of the token.
*
* @return the expiration time of the token, in milliseconds since Epoc.
*/
public long getExpires() {
return expires;
}
/**
* Returns the string representation of the token.
* <p>
* This string representation is parseable by the {@link #parse} method.
*
* @return the string representation of the token.
*/
@Override
public String toString() {
return tokenStr;
}
public static AuthToken parse(String tokenStr) throws AuthenticationException {
if (tokenStr.length() >= 2) {
// strip the \" at the two ends of the tokenStr
if (tokenStr.charAt(0) == '\"' &&
tokenStr.charAt(tokenStr.length()-1) == '\"') {
tokenStr = tokenStr.substring(1, tokenStr.length()-1);
}
}
Map<String, String> map = split(tokenStr);
// remove the signature part, since client doesn't care about it
map.remove("s");
if (!map.keySet().equals(ATTRIBUTES)) {
throw new AuthenticationException("Invalid token string, missing attributes");
}
long expires = Long.parseLong(map.get(EXPIRES));
AuthToken token = new AuthToken(map.get(USER_NAME), map.get(PRINCIPAL), map.get(TYPE));
token.setExpires(expires);
return token;
}
/**
* Splits the string representation of a token into attributes pairs.
*
* @param tokenStr string representation of a token.
*
* @return a map with the attribute pairs of the token.
*
* @throws AuthenticationException thrown if the string representation of the token could not be broken into
* attribute pairs.
*/
private static Map<String, String> split(String tokenStr) throws AuthenticationException {
Map<String, String> map = new HashMap<String, String>();
StringTokenizer st = new StringTokenizer(tokenStr, ATTR_SEPARATOR);
while (st.hasMoreTokens()) {
String part = st.nextToken();
int separator = part.indexOf('=');
if (separator == -1) {
throw new AuthenticationException("Invalid authentication token");
}
String key = part.substring(0, separator);
String value = part.substring(separator + 1);
map.put(key, value);
}
return map;
}
}
| 6,610 | 29.187215 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import com.google.common.annotations.VisibleForTesting;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.servlet.ServletContext;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.api.ACLProvider;
import org.apache.curator.framework.imps.DefaultACLProvider;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs.Perms;
import org.apache.zookeeper.client.ZooKeeperSaslClient;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A SignerSecretProvider that synchronizes a rolling random secret between
* multiple servers using ZooKeeper.
* <p>
* It works by storing the secrets and next rollover time in a ZooKeeper znode.
* All ZKSignerSecretProviders looking at that znode will use those
* secrets and next rollover time to ensure they are synchronized. There is no
* "leader" -- any of the ZKSignerSecretProviders can choose the next secret;
* which one is indeterminate. Kerberos-based ACLs can also be enforced to
* prevent a malicious third-party from getting or setting the secrets. It uses
* its own CuratorFramework client for talking to ZooKeeper. If you want to use
* your own Curator client, you can pass it to ZKSignerSecretProvider; see
* {@link org.apache.hadoop.security.authentication.server.AuthenticationFilter}
* for more details.
* <p>
* The supported configuration properties are:
* <ul>
* <li>signer.secret.provider.zookeeper.connection.string: indicates the
* ZooKeeper connection string to connect with.</li>
* <li>signer.secret.provider.zookeeper.path: indicates the ZooKeeper path
* to use for storing and retrieving the secrets. All ZKSignerSecretProviders
* that need to coordinate should point to the same path.</li>
* <li>signer.secret.provider.zookeeper.auth.type: indicates the auth type to
* use. Supported values are "none" and "sasl". The default value is "none"
* </li>
* <li>signer.secret.provider.zookeeper.kerberos.keytab: set this to the path
* with the Kerberos keytab file. This is only required if using Kerberos.</li>
* <li>signer.secret.provider.zookeeper.kerberos.principal: set this to the
* Kerberos principal to use. This only required if using Kerberos.</li>
* <li>signer.secret.provider.zookeeper.disconnect.on.close: when set to "true",
* ZKSignerSecretProvider will close the ZooKeeper connection on shutdown. The
* default is "true". Only set this to "false" if a custom Curator client is
* being provided and the disconnection is being handled elsewhere.</li>
* </ul>
*
* The following attribute in the ServletContext can also be set if desired:
* <ul>
* <li>signer.secret.provider.zookeeper.curator.client: A CuratorFramework
* client object can be passed here. If given, the "zookeeper" implementation
* will use this Curator client instead of creating its own, which is useful if
* you already have a Curator client or want more control over its
* configuration.</li>
* </ul>
*/
@InterfaceStability.Unstable
@InterfaceAudience.Private
public class ZKSignerSecretProvider extends RolloverSignerSecretProvider {
private static final String CONFIG_PREFIX =
"signer.secret.provider.zookeeper.";
/**
* Constant for the property that specifies the ZooKeeper connection string.
*/
public static final String ZOOKEEPER_CONNECTION_STRING =
CONFIG_PREFIX + "connection.string";
/**
* Constant for the property that specifies the ZooKeeper path.
*/
public static final String ZOOKEEPER_PATH = CONFIG_PREFIX + "path";
/**
* Constant for the property that specifies the auth type to use. Supported
* values are "none" and "sasl". The default value is "none".
*/
public static final String ZOOKEEPER_AUTH_TYPE = CONFIG_PREFIX + "auth.type";
/**
* Constant for the property that specifies the Kerberos keytab file.
*/
public static final String ZOOKEEPER_KERBEROS_KEYTAB =
CONFIG_PREFIX + "kerberos.keytab";
/**
* Constant for the property that specifies the Kerberos principal.
*/
public static final String ZOOKEEPER_KERBEROS_PRINCIPAL =
CONFIG_PREFIX + "kerberos.principal";
/**
* Constant for the property that specifies whether or not the Curator client
* should disconnect from ZooKeeper on shutdown. The default is "true". Only
* set this to "false" if a custom Curator client is being provided and the
* disconnection is being handled elsewhere.
*/
public static final String DISCONNECT_FROM_ZOOKEEPER_ON_SHUTDOWN =
CONFIG_PREFIX + "disconnect.on.shutdown";
/**
* Constant for the ServletContext attribute that can be used for providing a
* custom CuratorFramework client. If set ZKSignerSecretProvider will use this
* Curator client instead of creating a new one. The providing class is
* responsible for creating and configuring the Curator client (including
* security and ACLs) in this case.
*/
public static final String
ZOOKEEPER_SIGNER_SECRET_PROVIDER_CURATOR_CLIENT_ATTRIBUTE =
CONFIG_PREFIX + "curator.client";
private static final String JAAS_LOGIN_ENTRY_NAME =
"ZKSignerSecretProviderClient";
private static Logger LOG = LoggerFactory.getLogger(
ZKSignerSecretProvider.class);
private String path;
/**
* Stores the next secret that will be used after the current one rolls over.
* We do this to help with rollover performance by actually deciding the next
* secret at the previous rollover. This allows us to switch to the next
* secret very quickly. Afterwards, we have plenty of time to decide on the
* next secret.
*/
private volatile byte[] nextSecret;
private final Random rand;
/**
* Stores the current version of the znode.
*/
private int zkVersion;
/**
* Stores the next date that the rollover will occur. This is only used
* for allowing new servers joining later to synchronize their rollover
* with everyone else.
*/
private long nextRolloverDate;
private long tokenValidity;
private CuratorFramework client;
private boolean shouldDisconnect;
private static int INT_BYTES = Integer.SIZE / Byte.SIZE;
private static int LONG_BYTES = Long.SIZE / Byte.SIZE;
private static int DATA_VERSION = 0;
public ZKSignerSecretProvider() {
super();
rand = new Random();
}
/**
* This constructor lets you set the seed of the Random Number Generator and
* is meant for testing.
* @param seed the seed for the random number generator
*/
@VisibleForTesting
public ZKSignerSecretProvider(long seed) {
super();
rand = new Random(seed);
}
@Override
public void init(Properties config, ServletContext servletContext,
long tokenValidity) throws Exception {
Object curatorClientObj = servletContext.getAttribute(
ZOOKEEPER_SIGNER_SECRET_PROVIDER_CURATOR_CLIENT_ATTRIBUTE);
if (curatorClientObj != null
&& curatorClientObj instanceof CuratorFramework) {
client = (CuratorFramework) curatorClientObj;
} else {
client = createCuratorClient(config);
servletContext.setAttribute(
ZOOKEEPER_SIGNER_SECRET_PROVIDER_CURATOR_CLIENT_ATTRIBUTE, client);
}
this.tokenValidity = tokenValidity;
shouldDisconnect = Boolean.parseBoolean(
config.getProperty(DISCONNECT_FROM_ZOOKEEPER_ON_SHUTDOWN, "true"));
path = config.getProperty(ZOOKEEPER_PATH);
if (path == null) {
throw new IllegalArgumentException(ZOOKEEPER_PATH
+ " must be specified");
}
try {
nextRolloverDate = System.currentTimeMillis() + tokenValidity;
// everyone tries to do this, only one will succeed and only when the
// znode doesn't already exist. Everyone else will synchronize on the
// data from the znode
client.create().creatingParentsIfNeeded()
.forPath(path, generateZKData(generateRandomSecret(),
generateRandomSecret(), null));
zkVersion = 0;
LOG.info("Creating secret znode");
} catch (KeeperException.NodeExistsException nee) {
LOG.info("The secret znode already exists, retrieving data");
}
// Synchronize on the data from the znode
// passing true tells it to parse out all the data for initing
pullFromZK(true);
long initialDelay = nextRolloverDate - System.currentTimeMillis();
// If it's in the past, try to find the next interval that we should
// be using
if (initialDelay < 1l) {
int i = 1;
while (initialDelay < 1l) {
initialDelay = nextRolloverDate + tokenValidity * i
- System.currentTimeMillis();
i++;
}
}
super.startScheduler(initialDelay, tokenValidity);
}
/**
* Disconnects from ZooKeeper unless told not to.
*/
@Override
public void destroy() {
if (shouldDisconnect && client != null) {
client.close();
}
super.destroy();
}
@Override
protected synchronized void rollSecret() {
super.rollSecret();
// Try to push the information to ZooKeeper with a potential next secret.
nextRolloverDate += tokenValidity;
byte[][] secrets = super.getAllSecrets();
pushToZK(generateRandomSecret(), secrets[0], secrets[1]);
// Pull info from ZooKeeper to get the decided next secret
// passing false tells it that we don't care about most of the data
pullFromZK(false);
}
@Override
protected byte[] generateNewSecret() {
// We simply return nextSecret because it's already been decided on
return nextSecret;
}
/**
* Pushes proposed data to ZooKeeper. If a different server pushes its data
* first, it gives up.
* @param newSecret The new secret to use
* @param currentSecret The current secret
* @param previousSecret The previous secret
*/
private synchronized void pushToZK(byte[] newSecret, byte[] currentSecret,
byte[] previousSecret) {
byte[] bytes = generateZKData(newSecret, currentSecret, previousSecret);
try {
client.setData().withVersion(zkVersion).forPath(path, bytes);
} catch (KeeperException.BadVersionException bve) {
LOG.debug("Unable to push to znode; another server already did it");
} catch (Exception ex) {
LOG.error("An unexpected exception occured pushing data to ZooKeeper",
ex);
}
}
/**
* Serialize the data to attempt to push into ZooKeeper. The format is this:
* <p>
* [DATA_VERSION, newSecretLength, newSecret, currentSecretLength, currentSecret, previousSecretLength, previousSecret, nextRolloverDate]
* <p>
* Only previousSecret can be null, in which case the format looks like this:
* <p>
* [DATA_VERSION, newSecretLength, newSecret, currentSecretLength, currentSecret, 0, nextRolloverDate]
* <p>
* @param newSecret The new secret to use
* @param currentSecret The current secret
* @param previousSecret The previous secret
* @return The serialized data for ZooKeeper
*/
private synchronized byte[] generateZKData(byte[] newSecret,
byte[] currentSecret, byte[] previousSecret) {
int newSecretLength = newSecret.length;
int currentSecretLength = currentSecret.length;
int previousSecretLength = 0;
if (previousSecret != null) {
previousSecretLength = previousSecret.length;
}
ByteBuffer bb = ByteBuffer.allocate(INT_BYTES + INT_BYTES + newSecretLength
+ INT_BYTES + currentSecretLength + INT_BYTES + previousSecretLength
+ LONG_BYTES);
bb.putInt(DATA_VERSION);
bb.putInt(newSecretLength);
bb.put(newSecret);
bb.putInt(currentSecretLength);
bb.put(currentSecret);
bb.putInt(previousSecretLength);
if (previousSecretLength > 0) {
bb.put(previousSecret);
}
bb.putLong(nextRolloverDate);
return bb.array();
}
/**
* Pulls data from ZooKeeper. If isInit is false, it will only parse the
* next secret and version. If isInit is true, it will also parse the current
* and previous secrets, and the next rollover date; it will also init the
* secrets. Hence, isInit should only be true on startup.
* @param isInit see description above
*/
private synchronized void pullFromZK(boolean isInit) {
try {
Stat stat = new Stat();
byte[] bytes = client.getData().storingStatIn(stat).forPath(path);
ByteBuffer bb = ByteBuffer.wrap(bytes);
int dataVersion = bb.getInt();
if (dataVersion > DATA_VERSION) {
throw new IllegalStateException("Cannot load data from ZooKeeper; it"
+ "was written with a newer version");
}
int nextSecretLength = bb.getInt();
byte[] nextSecret = new byte[nextSecretLength];
bb.get(nextSecret);
this.nextSecret = nextSecret;
zkVersion = stat.getVersion();
if (isInit) {
int currentSecretLength = bb.getInt();
byte[] currentSecret = new byte[currentSecretLength];
bb.get(currentSecret);
int previousSecretLength = bb.getInt();
byte[] previousSecret = null;
if (previousSecretLength > 0) {
previousSecret = new byte[previousSecretLength];
bb.get(previousSecret);
}
super.initSecrets(currentSecret, previousSecret);
nextRolloverDate = bb.getLong();
}
} catch (Exception ex) {
LOG.error("An unexpected exception occurred while pulling data from"
+ "ZooKeeper", ex);
}
}
private byte[] generateRandomSecret() {
return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
}
/**
* This method creates the Curator client and connects to ZooKeeper.
* @param config configuration properties
* @return A Curator client
* @throws Exception thrown if an error occurred
*/
protected CuratorFramework createCuratorClient(Properties config)
throws Exception {
String connectionString = config.getProperty(
ZOOKEEPER_CONNECTION_STRING, "localhost:2181");
RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
ACLProvider aclProvider;
String authType = config.getProperty(ZOOKEEPER_AUTH_TYPE, "none");
if (authType.equals("sasl")) {
LOG.info("Connecting to ZooKeeper with SASL/Kerberos"
+ "and using 'sasl' ACLs");
String principal = setJaasConfiguration(config);
System.setProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
JAAS_LOGIN_ENTRY_NAME);
System.setProperty("zookeeper.authProvider.1",
"org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
aclProvider = new SASLOwnerACLProvider(principal);
} else { // "none"
LOG.info("Connecting to ZooKeeper without authentication");
aclProvider = new DefaultACLProvider(); // open to everyone
}
CuratorFramework cf = CuratorFrameworkFactory.builder()
.connectString(connectionString)
.retryPolicy(retryPolicy)
.aclProvider(aclProvider)
.build();
cf.start();
return cf;
}
private String setJaasConfiguration(Properties config) throws Exception {
String keytabFile = config.getProperty(ZOOKEEPER_KERBEROS_KEYTAB).trim();
if (keytabFile == null || keytabFile.length() == 0) {
throw new IllegalArgumentException(ZOOKEEPER_KERBEROS_KEYTAB
+ " must be specified");
}
String principal = config.getProperty(ZOOKEEPER_KERBEROS_PRINCIPAL)
.trim();
if (principal == null || principal.length() == 0) {
throw new IllegalArgumentException(ZOOKEEPER_KERBEROS_PRINCIPAL
+ " must be specified");
}
// This is equivalent to writing a jaas.conf file and setting the system
// property, "java.security.auth.login.config", to point to it
JaasConfiguration jConf =
new JaasConfiguration(JAAS_LOGIN_ENTRY_NAME, principal, keytabFile);
Configuration.setConfiguration(jConf);
return principal.split("[/@]")[0];
}
/**
* Simple implementation of an {@link ACLProvider} that simply returns an ACL
* that gives all permissions only to a single principal.
*/
private static class SASLOwnerACLProvider implements ACLProvider {
private final List<ACL> saslACL;
private SASLOwnerACLProvider(String principal) {
this.saslACL = Collections.singletonList(
new ACL(Perms.ALL, new Id("sasl", principal)));
}
@Override
public List<ACL> getDefaultAcl() {
return saslACL;
}
@Override
public List<ACL> getAclForPath(String path) {
return saslACL;
}
}
/**
* Creates a programmatic version of a jaas.conf file. This can be used
* instead of writing a jaas.conf file and setting the system property,
* "java.security.auth.login.config", to point to that file. It is meant to be
* used for connecting to ZooKeeper.
*/
@InterfaceAudience.Private
public static class JaasConfiguration extends Configuration {
private static AppConfigurationEntry[] entry;
private String entryName;
/**
* Add an entry to the jaas configuration with the passed in name,
* principal, and keytab. The other necessary options will be set for you.
*
* @param entryName The name of the entry (e.g. "Client")
* @param principal The principal of the user
* @param keytab The location of the keytab
*/
public JaasConfiguration(String entryName, String principal, String keytab) {
this.entryName = entryName;
Map<String, String> options = new HashMap<String, String>();
options.put("keyTab", keytab);
options.put("principal", principal);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("useTicketCache", "false");
options.put("refreshKrb5Config", "true");
String jaasEnvVar = System.getenv("HADOOP_JAAS_DEBUG");
if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) {
options.put("debug", "true");
}
entry = new AppConfigurationEntry[]{
new AppConfigurationEntry(getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options)};
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
return (entryName.equals(name)) ? entry : null;
}
private String getKrb5LoginModuleName() {
String krb5LoginModuleName;
if (System.getProperty("java.vendor").contains("IBM")) {
krb5LoginModuleName = "com.ibm.security.auth.module.Krb5LoginModule";
} else {
krb5LoginModuleName = "com.sun.security.auth.module.Krb5LoginModule";
}
return krb5LoginModuleName;
}
}
}
| 20,049 | 38.160156 | 139 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import com.google.common.annotations.VisibleForTesting;
import java.nio.charset.Charset;
import java.util.Random;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A SignerSecretProvider that uses a random number as its secret. It rolls
* the secret at a regular interval.
*/
@InterfaceStability.Unstable
@InterfaceAudience.Private
public class RandomSignerSecretProvider extends RolloverSignerSecretProvider {
private final Random rand;
public RandomSignerSecretProvider() {
super();
rand = new Random();
}
/**
* This constructor lets you set the seed of the Random Number Generator and
* is meant for testing.
* @param seed the seed for the random number generator
*/
@VisibleForTesting
public RandomSignerSecretProvider(long seed) {
super();
rand = new Random(seed);
}
@Override
protected byte[] generateNewSecret() {
return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
}
}
| 1,685 | 30.222222 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.servlet.ServletContext;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An abstract SignerSecretProvider that can be use used as the base for a
* rolling secret. The secret will roll over at the same interval as the token
* validity, so there are only ever a maximum of two valid secrets at any
* given time. This class handles storing and returning the secrets, as well
* as the rolling over. At a minimum, subclasses simply need to implement the
* generateNewSecret() method. More advanced implementations can override
* other methods to provide more advanced behavior, but should be careful when
* doing so.
*/
@InterfaceStability.Unstable
@InterfaceAudience.Private
public abstract class RolloverSignerSecretProvider
extends SignerSecretProvider {
private static Logger LOG = LoggerFactory.getLogger(
RolloverSignerSecretProvider.class);
/**
* Stores the currently valid secrets. The current secret is the 0th element
* in the array.
*/
private volatile byte[][] secrets;
private ScheduledExecutorService scheduler;
private boolean schedulerRunning;
private boolean isDestroyed;
public RolloverSignerSecretProvider() {
schedulerRunning = false;
isDestroyed = false;
}
/**
* Initialize the SignerSecretProvider. It initializes the current secret
* and starts the scheduler for the rollover to run at an interval of
* tokenValidity.
* @param config configuration properties
* @param servletContext servlet context
* @param tokenValidity The amount of time a token is valid for
* @throws Exception thrown if an error occurred
*/
@Override
public void init(Properties config, ServletContext servletContext,
long tokenValidity) throws Exception {
initSecrets(generateNewSecret(), null);
startScheduler(tokenValidity, tokenValidity);
}
/**
* Initializes the secrets array. This should typically be called only once,
* during init but some implementations may wish to call it other times.
* previousSecret can be null if there isn't a previous secret, but
* currentSecret should never be null.
* @param currentSecret The current secret
* @param previousSecret The previous secret
*/
protected void initSecrets(byte[] currentSecret, byte[] previousSecret) {
secrets = new byte[][]{currentSecret, previousSecret};
}
/**
* Starts the scheduler for the rollover to run at an interval.
* @param initialDelay The initial delay in the rollover in milliseconds
* @param period The interval for the rollover in milliseconds
*/
protected synchronized void startScheduler(long initialDelay, long period) {
if (!schedulerRunning) {
schedulerRunning = true;
scheduler = Executors.newSingleThreadScheduledExecutor();
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
rollSecret();
}
}, initialDelay, period, TimeUnit.MILLISECONDS);
}
}
@Override
public synchronized void destroy() {
if (!isDestroyed) {
isDestroyed = true;
if (scheduler != null) {
scheduler.shutdown();
}
schedulerRunning = false;
super.destroy();
}
}
/**
* Rolls the secret. It is called automatically at the rollover interval.
*/
protected synchronized void rollSecret() {
if (!isDestroyed) {
LOG.debug("rolling secret");
byte[] newSecret = generateNewSecret();
secrets = new byte[][]{newSecret, secrets[0]};
}
}
/**
* Subclasses should implement this to return a new secret. It will be called
* automatically at the secret rollover interval. It should never return null.
* @return a new secret
*/
protected abstract byte[] generateNewSecret();
@Override
public byte[] getCurrentSecret() {
return secrets[0];
}
@Override
public byte[][] getAllSecrets() {
return secrets;
}
}
| 4,871 | 33.06993 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
|
package org.apache.hadoop.security.authentication.util;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class implements parsing and handling of Kerberos principal names. In
* particular, it splits them apart and translates them down into local
* operating system names.
*/
@SuppressWarnings("all")
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class KerberosName {
private static final Logger LOG = LoggerFactory.getLogger(KerberosName.class);
/** The first component of the name */
private final String serviceName;
/** The second component of the name. It may be null. */
private final String hostName;
/** The realm of the name. */
private final String realm;
/**
* A pattern that matches a Kerberos name with at most 2 components.
*/
private static final Pattern nameParser =
Pattern.compile("([^/@]*)(/([^/@]*))?@([^/@]*)");
/**
* A pattern that matches a string with out '$' and then a single
* parameter with $n.
*/
private static Pattern parameterPattern =
Pattern.compile("([^$]*)(\\$(\\d*))?");
/**
* A pattern for parsing a auth_to_local rule.
*/
private static final Pattern ruleParser =
Pattern.compile("\\s*((DEFAULT)|(RULE:\\[(\\d*):([^\\]]*)](\\(([^)]*)\\))?"+
"(s/([^/]*)/([^/]*)/(g)?)?))/?(L)?");
/**
* A pattern that recognizes simple/non-simple names.
*/
private static final Pattern nonSimplePattern = Pattern.compile("[/@]");
/**
* The list of translation rules.
*/
private static List<Rule> rules;
private static String defaultRealm;
static {
try {
defaultRealm = KerberosUtil.getDefaultRealm();
} catch (Exception ke) {
LOG.debug("Kerberos krb5 configuration not found, setting default realm to empty");
defaultRealm="";
}
}
/**
* Create a name from the full Kerberos principal name.
* @param name full Kerberos principal name.
*/
public KerberosName(String name) {
Matcher match = nameParser.matcher(name);
if (!match.matches()) {
if (name.contains("@")) {
throw new IllegalArgumentException("Malformed Kerberos name: " + name);
} else {
serviceName = name;
hostName = null;
realm = null;
}
} else {
serviceName = match.group(1);
hostName = match.group(3);
realm = match.group(4);
}
}
/**
* Get the configured default realm.
* @return the default realm from the krb5.conf
*/
public String getDefaultRealm() {
return defaultRealm;
}
/**
* Put the name back together from the parts.
*/
@Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(serviceName);
if (hostName != null) {
result.append('/');
result.append(hostName);
}
if (realm != null) {
result.append('@');
result.append(realm);
}
return result.toString();
}
/**
* Get the first component of the name.
* @return the first section of the Kerberos principal name
*/
public String getServiceName() {
return serviceName;
}
/**
* Get the second component of the name.
* @return the second section of the Kerberos principal name, and may be null
*/
public String getHostName() {
return hostName;
}
/**
* Get the realm of the name.
* @return the realm of the name, may be null
*/
public String getRealm() {
return realm;
}
/**
* An encoding of a rule for translating kerberos names.
*/
private static class Rule {
private final boolean isDefault;
private final int numOfComponents;
private final String format;
private final Pattern match;
private final Pattern fromPattern;
private final String toPattern;
private final boolean repeat;
private final boolean toLowerCase;
Rule() {
isDefault = true;
numOfComponents = 0;
format = null;
match = null;
fromPattern = null;
toPattern = null;
repeat = false;
toLowerCase = false;
}
Rule(int numOfComponents, String format, String match, String fromPattern,
String toPattern, boolean repeat, boolean toLowerCase) {
isDefault = false;
this.numOfComponents = numOfComponents;
this.format = format;
this.match = match == null ? null : Pattern.compile(match);
this.fromPattern =
fromPattern == null ? null : Pattern.compile(fromPattern);
this.toPattern = toPattern;
this.repeat = repeat;
this.toLowerCase = toLowerCase;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
if (isDefault) {
buf.append("DEFAULT");
} else {
buf.append("RULE:[");
buf.append(numOfComponents);
buf.append(':');
buf.append(format);
buf.append(']');
if (match != null) {
buf.append('(');
buf.append(match);
buf.append(')');
}
if (fromPattern != null) {
buf.append("s/");
buf.append(fromPattern);
buf.append('/');
buf.append(toPattern);
buf.append('/');
if (repeat) {
buf.append('g');
}
}
if (toLowerCase) {
buf.append("/L");
}
}
return buf.toString();
}
/**
* Replace the numbered parameters of the form $n where n is from 1 to
* the length of params. Normal text is copied directly and $n is replaced
* by the corresponding parameter.
* @param format the string to replace parameters again
* @param params the list of parameters
* @return the generated string with the parameter references replaced.
* @throws BadFormatString
*/
static String replaceParameters(String format,
String[] params) throws BadFormatString {
Matcher match = parameterPattern.matcher(format);
int start = 0;
StringBuilder result = new StringBuilder();
while (start < format.length() && match.find(start)) {
result.append(match.group(1));
String paramNum = match.group(3);
if (paramNum != null) {
try {
int num = Integer.parseInt(paramNum);
if (num < 0 || num > params.length) {
throw new BadFormatString("index " + num + " from " + format +
" is outside of the valid range 0 to " +
(params.length - 1));
}
result.append(params[num]);
} catch (NumberFormatException nfe) {
throw new BadFormatString("bad format in username mapping in " +
paramNum, nfe);
}
}
start = match.end();
}
return result.toString();
}
/**
* Replace the matches of the from pattern in the base string with the value
* of the to string.
* @param base the string to transform
* @param from the pattern to look for in the base string
* @param to the string to replace matches of the pattern with
* @param repeat whether the substitution should be repeated
* @return
*/
static String replaceSubstitution(String base, Pattern from, String to,
boolean repeat) {
Matcher match = from.matcher(base);
if (repeat) {
return match.replaceAll(to);
} else {
return match.replaceFirst(to);
}
}
/**
* Try to apply this rule to the given name represented as a parameter
* array.
* @param params first element is the realm, second and later elements are
* are the components of the name "a/b@FOO" -> {"FOO", "a", "b"}
* @return the short name if this rule applies or null
* @throws IOException throws if something is wrong with the rules
*/
String apply(String[] params) throws IOException {
String result = null;
if (isDefault) {
if (defaultRealm.equals(params[0])) {
result = params[1];
}
} else if (params.length - 1 == numOfComponents) {
String base = replaceParameters(format, params);
if (match == null || match.matcher(base).matches()) {
if (fromPattern == null) {
result = base;
} else {
result = replaceSubstitution(base, fromPattern, toPattern, repeat);
}
}
}
if (result != null && nonSimplePattern.matcher(result).find()) {
throw new NoMatchingRule("Non-simple name " + result +
" after auth_to_local rule " + this);
}
if (toLowerCase && result != null) {
result = result.toLowerCase(Locale.ENGLISH);
}
return result;
}
}
static List<Rule> parseRules(String rules) {
List<Rule> result = new ArrayList<Rule>();
String remaining = rules.trim();
while (remaining.length() > 0) {
Matcher matcher = ruleParser.matcher(remaining);
if (!matcher.lookingAt()) {
throw new IllegalArgumentException("Invalid rule: " + remaining);
}
if (matcher.group(2) != null) {
result.add(new Rule());
} else {
result.add(new Rule(Integer.parseInt(matcher.group(4)),
matcher.group(5),
matcher.group(7),
matcher.group(9),
matcher.group(10),
"g".equals(matcher.group(11)),
"L".equals(matcher.group(12))));
}
remaining = remaining.substring(matcher.end());
}
return result;
}
@SuppressWarnings("serial")
public static class BadFormatString extends IOException {
BadFormatString(String msg) {
super(msg);
}
BadFormatString(String msg, Throwable err) {
super(msg, err);
}
}
@SuppressWarnings("serial")
public static class NoMatchingRule extends IOException {
NoMatchingRule(String msg) {
super(msg);
}
}
/**
* Get the translation of the principal name into an operating system
* user name.
* @return the short name
* @throws IOException throws if something is wrong with the rules
*/
public String getShortName() throws IOException {
String[] params;
if (hostName == null) {
// if it is already simple, just return it
if (realm == null) {
return serviceName;
}
params = new String[]{realm, serviceName};
} else {
params = new String[]{realm, serviceName, hostName};
}
for(Rule r: rules) {
String result = r.apply(params);
if (result != null) {
return result;
}
}
throw new NoMatchingRule("No rules applied to " + toString());
}
/**
* Set the rules.
* @param ruleString the rules string.
*/
public static void setRules(String ruleString) {
rules = (ruleString != null) ? parseRules(ruleString) : null;
}
/**
* Get the rules.
* @return String of configured rules, or null if not yet configured
*/
public static String getRules() {
String ruleString = null;
if (rules != null) {
StringBuilder sb = new StringBuilder();
for (Rule rule : rules) {
sb.append(rule.toString()).append("\n");
}
ruleString = sb.toString().trim();
}
return ruleString;
}
/**
* Indicates if the name rules have been set.
*
* @return if the name rules have been set.
*/
public static boolean hasRulesBeenSet() {
return rules != null;
}
static void printRules() throws IOException {
int i = 0;
for(Rule r: rules) {
System.out.println(++i + " " + r);
}
}
}
| 12,944 | 28.896074 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerSecretProvider.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.util;
import java.util.Properties;
import javax.servlet.ServletContext;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The SignerSecretProvider is an abstract way to provide a secret to be used
* by the Signer so that we can have different implementations that potentially
* do more complicated things in the backend.
* See the RolloverSignerSecretProvider class for an implementation that
* supports rolling over the secret at a regular interval.
*/
@InterfaceStability.Unstable
@InterfaceAudience.Private
public abstract class SignerSecretProvider {
/**
* Initialize the SignerSecretProvider
* @param config configuration properties
* @param servletContext servlet context
* @param tokenValidity The amount of time a token is valid for
* @throws Exception thrown if an error occurred
*/
public abstract void init(Properties config, ServletContext servletContext,
long tokenValidity) throws Exception;
/**
* Will be called on shutdown; subclasses should perform any cleanup here.
*/
public void destroy() {}
/**
* Returns the current secret to be used by the Signer for signing new
* cookies. This should never return null.
* <p>
* Callers should be careful not to modify the returned value.
* @return the current secret
*/
public abstract byte[] getCurrentSecret();
/**
* Returns all secrets that a cookie could have been signed with and are still
* valid; this should include the secret returned by getCurrentSecret().
* <p>
* Callers should be careful not to modify the returned value.
* @return the secrets
*/
public abstract byte[][] getAllSecrets();
}
| 2,377 | 36.15625 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.authentication.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.security.Principal;
import java.text.SimpleDateFormat;
import java.util.*;
/**
* <p>The {@link AuthenticationFilter} enables protecting web application
* resources with different (pluggable)
* authentication mechanisms and signer secret providers.
* </p>
* <p>
* Out of the box it provides 2 authentication mechanisms: Pseudo and Kerberos SPNEGO.
* </p>
* Additional authentication mechanisms are supported via the {@link AuthenticationHandler} interface.
* <p>
* This filter delegates to the configured authentication handler for authentication and once it obtains an
* {@link AuthenticationToken} from it, sets a signed HTTP cookie with the token. For client requests
* that provide the signed HTTP cookie, it verifies the validity of the cookie, extracts the user information
* and lets the request proceed to the target resource.
* </p>
* The supported configuration properties are:
* <ul>
* <li>config.prefix: indicates the prefix to be used by all other configuration properties, the default value
* is no prefix. See below for details on how/why this prefix is used.</li>
* <li>[#PREFIX#.]type: simple|kerberos|#CLASS#, 'simple' is short for the
* {@link PseudoAuthenticationHandler}, 'kerberos' is short for {@link KerberosAuthenticationHandler}, otherwise
* the full class name of the {@link AuthenticationHandler} must be specified.</li>
* <li>[#PREFIX#.]signature.secret: when signer.secret.provider is set to
* "string" or not specified, this is the value for the secret used to sign the
* HTTP cookie.</li>
* <li>[#PREFIX#.]token.validity: time -in seconds- that the generated token is
* valid before a new authentication is triggered, default value is
* <code>3600</code> seconds. This is also used for the rollover interval for
* the "random" and "zookeeper" SignerSecretProviders.</li>
* <li>[#PREFIX#.]cookie.domain: domain to use for the HTTP cookie that stores the authentication token.</li>
* <li>[#PREFIX#.]cookie.path: path to use for the HTTP cookie that stores the authentication token.</li>
* </ul>
* <p>
* The rest of the configuration properties are specific to the {@link AuthenticationHandler} implementation and the
* {@link AuthenticationFilter} will take all the properties that start with the prefix #PREFIX#, it will remove
* the prefix from it and it will pass them to the the authentication handler for initialization. Properties that do
* not start with the prefix will not be passed to the authentication handler initialization.
* </p>
* <p>
* Out of the box it provides 3 signer secret provider implementations:
* "string", "random", and "zookeeper"
* </p>
* Additional signer secret providers are supported via the
* {@link SignerSecretProvider} class.
* <p>
* For the HTTP cookies mentioned above, the SignerSecretProvider is used to
* determine the secret to use for signing the cookies. Different
* implementations can have different behaviors. The "string" implementation
* simply uses the string set in the [#PREFIX#.]signature.secret property
* mentioned above. The "random" implementation uses a randomly generated
* secret that rolls over at the interval specified by the
* [#PREFIX#.]token.validity mentioned above. The "zookeeper" implementation
* is like the "random" one, except that it synchronizes the random secret
* and rollovers between multiple servers; it's meant for HA services.
* </p>
* The relevant configuration properties are:
* <ul>
* <li>signer.secret.provider: indicates the name of the SignerSecretProvider
* class to use. Possible values are: "string", "random", "zookeeper", or a
* classname. If not specified, the "string" implementation will be used with
* [#PREFIX#.]signature.secret; and if that's not specified, the "random"
* implementation will be used.</li>
* <li>[#PREFIX#.]signature.secret: When the "string" implementation is
* specified, this value is used as the secret.</li>
* <li>[#PREFIX#.]token.validity: When the "random" or "zookeeper"
* implementations are specified, this value is used as the rollover
* interval.</li>
* </ul>
* <p>
* The "zookeeper" implementation has additional configuration properties that
* must be specified; see {@link ZKSignerSecretProvider} for details.
* </p>
* For subclasses of AuthenticationFilter that want additional control over the
* SignerSecretProvider, they can use the following attribute set in the
* ServletContext:
* <ul>
* <li>signer.secret.provider.object: A SignerSecretProvider implementation can
* be passed here that will be used instead of the signer.secret.provider
* configuration property. Note that the class should already be
* initialized.</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class AuthenticationFilter implements Filter {
private static Logger LOG = LoggerFactory.getLogger(AuthenticationFilter.class);
/**
* Constant for the property that specifies the configuration prefix.
*/
public static final String CONFIG_PREFIX = "config.prefix";
/**
* Constant for the property that specifies the authentication handler to use.
*/
public static final String AUTH_TYPE = "type";
/**
* Constant for the property that specifies the secret to use for signing the HTTP Cookies.
*/
public static final String SIGNATURE_SECRET = "signature.secret";
public static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
/**
* Constant for the configuration property that indicates the validity of the generated token.
*/
public static final String AUTH_TOKEN_VALIDITY = "token.validity";
/**
* Constant for the configuration property that indicates the domain to use in the HTTP cookie.
*/
public static final String COOKIE_DOMAIN = "cookie.domain";
/**
* Constant for the configuration property that indicates the path to use in the HTTP cookie.
*/
public static final String COOKIE_PATH = "cookie.path";
/**
* Constant for the configuration property
* that indicates the persistence of the HTTP cookie.
*/
public static final String COOKIE_PERSISTENT = "cookie.persistent";
/**
* Constant for the configuration property that indicates the name of the
* SignerSecretProvider class to use.
* Possible values are: "string", "random", "zookeeper", or a classname.
* If not specified, the "string" implementation will be used with
* SIGNATURE_SECRET; and if that's not specified, the "random" implementation
* will be used.
*/
public static final String SIGNER_SECRET_PROVIDER =
"signer.secret.provider";
/**
* Constant for the ServletContext attribute that can be used for providing a
* custom implementation of the SignerSecretProvider. Note that the class
* should already be initialized. If not specified, SIGNER_SECRET_PROVIDER
* will be used.
*/
public static final String SIGNER_SECRET_PROVIDER_ATTRIBUTE =
"signer.secret.provider.object";
private Properties config;
private Signer signer;
private SignerSecretProvider secretProvider;
private AuthenticationHandler authHandler;
private long validity;
private String cookieDomain;
private String cookiePath;
private boolean isCookiePersistent;
private boolean isInitializedByTomcat;
/**
* <p>Initializes the authentication filter and signer secret provider.</p>
* It instantiates and initializes the specified {@link
* AuthenticationHandler}.
*
* @param filterConfig filter configuration.
*
* @throws ServletException thrown if the filter or the authentication handler could not be initialized properly.
*/
@Override
public void init(FilterConfig filterConfig) throws ServletException {
String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX);
configPrefix = (configPrefix != null) ? configPrefix + "." : "";
config = getConfiguration(configPrefix, filterConfig);
String authHandlerName = config.getProperty(AUTH_TYPE, null);
String authHandlerClassName;
if (authHandlerName == null) {
throw new ServletException("Authentication type must be specified: " +
PseudoAuthenticationHandler.TYPE + "|" +
KerberosAuthenticationHandler.TYPE + "|<class>");
}
if (authHandlerName.toLowerCase(Locale.ENGLISH).equals(
PseudoAuthenticationHandler.TYPE)) {
authHandlerClassName = PseudoAuthenticationHandler.class.getName();
} else if (authHandlerName.toLowerCase(Locale.ENGLISH).equals(
KerberosAuthenticationHandler.TYPE)) {
authHandlerClassName = KerberosAuthenticationHandler.class.getName();
} else {
authHandlerClassName = authHandlerName;
}
validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
* 1000; //10 hours
initializeSecretProvider(filterConfig);
initializeAuthHandler(authHandlerClassName, filterConfig);
cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
cookiePath = config.getProperty(COOKIE_PATH, null);
isCookiePersistent = Boolean.parseBoolean(
config.getProperty(COOKIE_PERSISTENT, "false"));
}
protected void initializeAuthHandler(String authHandlerClassName, FilterConfig filterConfig)
throws ServletException {
try {
Class<?> klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName);
authHandler = (AuthenticationHandler) klass.newInstance();
authHandler.init(config);
} catch (ClassNotFoundException | InstantiationException |
IllegalAccessException ex) {
throw new ServletException(ex);
}
}
protected void initializeSecretProvider(FilterConfig filterConfig)
throws ServletException {
secretProvider = (SignerSecretProvider) filterConfig.getServletContext().
getAttribute(SIGNER_SECRET_PROVIDER_ATTRIBUTE);
if (secretProvider == null) {
// As tomcat cannot specify the provider object in the configuration.
// It'll go into this path
try {
secretProvider = constructSecretProvider(
filterConfig.getServletContext(),
config, false);
isInitializedByTomcat = true;
} catch (Exception ex) {
throw new ServletException(ex);
}
}
signer = new Signer(secretProvider);
}
public static SignerSecretProvider constructSecretProvider(
ServletContext ctx, Properties config,
boolean disallowFallbackToRandomSecretProvider) throws Exception {
String name = config.getProperty(SIGNER_SECRET_PROVIDER, "file");
long validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY,
"36000")) * 1000;
if (!disallowFallbackToRandomSecretProvider
&& "file".equals(name)
&& config.getProperty(SIGNATURE_SECRET_FILE) == null) {
name = "random";
}
SignerSecretProvider provider;
if ("file".equals(name)) {
provider = new FileSignerSecretProvider();
try {
provider.init(config, ctx, validity);
} catch (Exception e) {
if (!disallowFallbackToRandomSecretProvider) {
LOG.info("Unable to initialize FileSignerSecretProvider, " +
"falling back to use random secrets.");
provider = new RandomSignerSecretProvider();
provider.init(config, ctx, validity);
} else {
throw e;
}
}
} else if ("random".equals(name)) {
provider = new RandomSignerSecretProvider();
provider.init(config, ctx, validity);
} else if ("zookeeper".equals(name)) {
provider = new ZKSignerSecretProvider();
provider.init(config, ctx, validity);
} else {
provider = (SignerSecretProvider) Thread.currentThread().
getContextClassLoader().loadClass(name).newInstance();
provider.init(config, ctx, validity);
}
return provider;
}
/**
* Returns the configuration properties of the {@link AuthenticationFilter}
* without the prefix. The returned properties are the same that the
* {@link #getConfiguration(String, FilterConfig)} method returned.
*
* @return the configuration properties.
*/
protected Properties getConfiguration() {
return config;
}
/**
* Returns the authentication handler being used.
*
* @return the authentication handler being used.
*/
protected AuthenticationHandler getAuthenticationHandler() {
return authHandler;
}
/**
* Returns if a random secret is being used.
*
* @return if a random secret is being used.
*/
protected boolean isRandomSecret() {
return secretProvider.getClass() == RandomSignerSecretProvider.class;
}
/**
* Returns if a custom implementation of a SignerSecretProvider is being used.
*
* @return if a custom implementation of a SignerSecretProvider is being used.
*/
protected boolean isCustomSignerSecretProvider() {
Class<?> clazz = secretProvider.getClass();
return clazz != FileSignerSecretProvider.class && clazz !=
RandomSignerSecretProvider.class && clazz != ZKSignerSecretProvider
.class;
}
/**
* Returns the validity time of the generated tokens.
*
* @return the validity time of the generated tokens, in seconds.
*/
protected long getValidity() {
return validity / 1000;
}
/**
* Returns the cookie domain to use for the HTTP cookie.
*
* @return the cookie domain to use for the HTTP cookie.
*/
protected String getCookieDomain() {
return cookieDomain;
}
/**
* Returns the cookie path to use for the HTTP cookie.
*
* @return the cookie path to use for the HTTP cookie.
*/
protected String getCookiePath() {
return cookiePath;
}
/**
* Returns the cookie persistence to use for the HTTP cookie.
*
* @return the cookie persistence to use for the HTTP cookie.
*/
protected boolean isCookiePersistent() {
return isCookiePersistent;
}
/**
* Destroys the filter.
* <p>
* It invokes the {@link AuthenticationHandler#destroy()} method to release any resources it may hold.
*/
@Override
public void destroy() {
if (authHandler != null) {
authHandler.destroy();
authHandler = null;
}
if (secretProvider != null && isInitializedByTomcat) {
secretProvider.destroy();
secretProvider = null;
}
}
/**
* Returns the filtered configuration (only properties starting with the specified prefix). The property keys
* are also trimmed from the prefix. The returned {@link Properties} object is used to initialized the
* {@link AuthenticationHandler}.
* <p>
* This method can be overriden by subclasses to obtain the configuration from other configuration source than
* the web.xml file.
*
* @param configPrefix configuration prefix to use for extracting configuration properties.
* @param filterConfig filter configuration object
*
* @return the configuration to be used with the {@link AuthenticationHandler} instance.
*
* @throws ServletException thrown if the configuration could not be created.
*/
protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) throws ServletException {
Properties props = new Properties();
Enumeration<?> names = filterConfig.getInitParameterNames();
while (names.hasMoreElements()) {
String name = (String) names.nextElement();
if (name.startsWith(configPrefix)) {
String value = filterConfig.getInitParameter(name);
props.put(name.substring(configPrefix.length()), value);
}
}
return props;
}
/**
* Returns the full URL of the request including the query string.
* <p>
* Used as a convenience method for logging purposes.
*
* @param request the request object.
*
* @return the full URL of the request including the query string.
*/
protected String getRequestURL(HttpServletRequest request) {
StringBuffer sb = request.getRequestURL();
if (request.getQueryString() != null) {
sb.append("?").append(request.getQueryString());
}
return sb.toString();
}
/**
* Returns the {@link AuthenticationToken} for the request.
* <p>
* It looks at the received HTTP cookies and extracts the value of the {@link AuthenticatedURL#AUTH_COOKIE}
* if present. It verifies the signature and if correct it creates the {@link AuthenticationToken} and returns
* it.
* <p>
* If this method returns <code>null</code> the filter will invoke the configured {@link AuthenticationHandler}
* to perform user authentication.
*
* @param request request object.
*
* @return the Authentication token if the request is authenticated, <code>null</code> otherwise.
*
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if the token is invalid or if it has expired.
*/
protected AuthenticationToken getToken(HttpServletRequest request) throws IOException, AuthenticationException {
AuthenticationToken token = null;
String tokenStr = null;
Cookie[] cookies = request.getCookies();
if (cookies != null) {
for (Cookie cookie : cookies) {
if (cookie.getName().equals(AuthenticatedURL.AUTH_COOKIE)) {
tokenStr = cookie.getValue();
try {
tokenStr = signer.verifyAndExtract(tokenStr);
} catch (SignerException ex) {
throw new AuthenticationException(ex);
}
break;
}
}
}
if (tokenStr != null) {
token = AuthenticationToken.parse(tokenStr);
if (!token.getType().equals(authHandler.getType())) {
throw new AuthenticationException("Invalid AuthenticationToken type");
}
if (token.isExpired()) {
throw new AuthenticationException("AuthenticationToken expired");
}
}
return token;
}
/**
* If the request has a valid authentication token it allows the request to continue to the target resource,
* otherwise it triggers an authentication sequence using the configured {@link AuthenticationHandler}.
*
* @param request the request object.
* @param response the response object.
* @param filterChain the filter chain object.
*
* @throws IOException thrown if an IO error occurred.
* @throws ServletException thrown if a processing error occurred.
*/
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
throws IOException, ServletException {
boolean unauthorizedResponse = true;
int errCode = HttpServletResponse.SC_UNAUTHORIZED;
AuthenticationException authenticationEx = null;
HttpServletRequest httpRequest = (HttpServletRequest) request;
HttpServletResponse httpResponse = (HttpServletResponse) response;
boolean isHttps = "https".equals(httpRequest.getScheme());
try {
boolean newToken = false;
AuthenticationToken token;
try {
token = getToken(httpRequest);
}
catch (AuthenticationException ex) {
LOG.warn("AuthenticationToken ignored: " + ex.getMessage());
// will be sent back in a 401 unless filter authenticates
authenticationEx = ex;
token = null;
}
if (authHandler.managementOperation(token, httpRequest, httpResponse)) {
if (token == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Request [{}] triggering authentication", getRequestURL(httpRequest));
}
token = authHandler.authenticate(httpRequest, httpResponse);
if (token != null && token.getExpires() != 0 &&
token != AuthenticationToken.ANONYMOUS) {
token.setExpires(System.currentTimeMillis() + getValidity() * 1000);
}
newToken = true;
}
if (token != null) {
unauthorizedResponse = false;
if (LOG.isDebugEnabled()) {
LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName());
}
final AuthenticationToken authToken = token;
httpRequest = new HttpServletRequestWrapper(httpRequest) {
@Override
public String getAuthType() {
return authToken.getType();
}
@Override
public String getRemoteUser() {
return authToken.getUserName();
}
@Override
public Principal getUserPrincipal() {
return (authToken != AuthenticationToken.ANONYMOUS) ? authToken : null;
}
};
if (newToken && !token.isExpired() && token != AuthenticationToken.ANONYMOUS) {
String signedToken = signer.sign(token.toString());
createAuthCookie(httpResponse, signedToken, getCookieDomain(),
getCookiePath(), token.getExpires(),
isCookiePersistent(), isHttps);
}
doFilter(filterChain, httpRequest, httpResponse);
}
} else {
unauthorizedResponse = false;
}
} catch (AuthenticationException ex) {
// exception from the filter itself is fatal
errCode = HttpServletResponse.SC_FORBIDDEN;
authenticationEx = ex;
if (LOG.isDebugEnabled()) {
LOG.debug("Authentication exception: " + ex.getMessage(), ex);
} else {
LOG.warn("Authentication exception: " + ex.getMessage());
}
}
if (unauthorizedResponse) {
if (!httpResponse.isCommitted()) {
createAuthCookie(httpResponse, "", getCookieDomain(),
getCookiePath(), 0, isCookiePersistent(), isHttps);
// If response code is 401. Then WWW-Authenticate Header should be
// present.. reset to 403 if not found..
if ((errCode == HttpServletResponse.SC_UNAUTHORIZED)
&& (!httpResponse.containsHeader(
KerberosAuthenticator.WWW_AUTHENTICATE))) {
errCode = HttpServletResponse.SC_FORBIDDEN;
}
if (authenticationEx == null) {
httpResponse.sendError(errCode, "Authentication required");
} else {
httpResponse.sendError(errCode, authenticationEx.getMessage());
}
}
}
}
/**
* Delegates call to the servlet filter chain. Sub-classes my override this
* method to perform pre and post tasks.
*
* @param filterChain the filter chain object.
* @param request the request object.
* @param response the response object.
*
* @throws IOException thrown if an IO error occurred.
* @throws ServletException thrown if a processing error occurred.
*/
protected void doFilter(FilterChain filterChain, HttpServletRequest request,
HttpServletResponse response) throws IOException, ServletException {
filterChain.doFilter(request, response);
}
/**
* Creates the Hadoop authentication HTTP cookie.
*
* @param resp the response object.
* @param token authentication token for the cookie.
* @param domain the cookie domain.
* @param path the cokie path.
* @param expires UNIX timestamp that indicates the expire date of the
* cookie. It has no effect if its value < 0.
* @param isSecure is the cookie secure?
* @param token the token.
* @param expires the cookie expiration time.
* @param isCookiePersistent whether the cookie is persistent or not.
*
* XXX the following code duplicate some logic in Jetty / Servlet API,
* because of the fact that Hadoop is stuck at servlet 2.5 and jetty 6
* right now.
*/
public static void createAuthCookie(HttpServletResponse resp, String token,
String domain, String path, long expires,
boolean isCookiePersistent,
boolean isSecure) {
StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE)
.append("=");
if (token != null && token.length() > 0) {
sb.append("\"").append(token).append("\"");
}
if (path != null) {
sb.append("; Path=").append(path);
}
if (domain != null) {
sb.append("; Domain=").append(domain);
}
if (expires >= 0 && isCookiePersistent) {
Date date = new Date(expires);
SimpleDateFormat df = new SimpleDateFormat("EEE, " +
"dd-MMM-yyyy HH:mm:ss zzz");
df.setTimeZone(TimeZone.getTimeZone("GMT"));
sb.append("; Expires=").append(df.format(date));
}
if (isSecure) {
sb.append("; Secure");
}
sb.append("; HttpOnly");
resp.addHeader("Set-Cookie", sb.toString());
}
}
| 26,275 | 37.812408 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import java.io.IOException;
import javax.servlet.http.Cookie;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import java.text.ParseException;
import java.io.ByteArrayInputStream;
import java.io.UnsupportedEncodingException;
import java.security.PublicKey;
import java.security.cert.CertificateFactory;
import java.security.cert.X509Certificate;
import java.security.cert.CertificateException;
import java.security.interfaces.RSAPublicKey;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AltKerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.util.CertificateUtil;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.nimbusds.jwt.SignedJWT;
import com.nimbusds.jose.JOSEException;
import com.nimbusds.jose.JWSObject;
import com.nimbusds.jose.JWSVerifier;
import com.nimbusds.jose.crypto.RSASSAVerifier;
/**
* The {@link JWTRedirectAuthenticationHandler} extends
* AltKerberosAuthenticationHandler to add WebSSO behavior for UIs. The expected
* SSO token is a JsonWebToken (JWT). The supported algorithm is RS256 which
* uses PKI between the token issuer and consumer. The flow requires a redirect
* to a configured authentication server URL and a subsequent request with the
* expected JWT token. This token is cryptographically verified and validated.
* The user identity is then extracted from the token and used to create an
* AuthenticationToken - as expected by the AuthenticationFilter.
*
* <p>
* The supported configuration properties are:
* </p>
* <ul>
* <li>authentication.provider.url: the full URL to the authentication server.
* This is the URL that the handler will redirect the browser to in order to
* authenticate the user. It does not have a default value.</li>
* <li>public.key.pem: This is the PEM formatted public key of the issuer of the
* JWT token. It is required for verifying that the issuer is a trusted party.
* DO NOT include the PEM header and footer portions of the PEM encoded
* certificate. It does not have a default value.</li>
* <li>expected.jwt.audiences: This is a list of strings that identify
* acceptable audiences for the JWT token. The audience is a way for the issuer
* to indicate what entity/s that the token is intended for. Default value is
* null which indicates that all audiences will be accepted.</li>
* <li>jwt.cookie.name: the name of the cookie that contains the JWT token.
* Default value is "hadoop-jwt".</li>
* </ul>
*/
public class JWTRedirectAuthenticationHandler extends
AltKerberosAuthenticationHandler {
private static Logger LOG = LoggerFactory
.getLogger(JWTRedirectAuthenticationHandler.class);
public static final String AUTHENTICATION_PROVIDER_URL = "authentication.provider.url";
public static final String PUBLIC_KEY_PEM = "public.key.pem";
public static final String EXPECTED_JWT_AUDIENCES = "expected.jwt.audiences";
public static final String JWT_COOKIE_NAME = "jwt.cookie.name";
private static final String ORIGINAL_URL_QUERY_PARAM = "originalUrl=";
private String authenticationProviderUrl = null;
private RSAPublicKey publicKey = null;
private List<String> audiences = null;
private String cookieName = "hadoop-jwt";
/**
* Primarily for testing, this provides a way to set the publicKey for
* signature verification without needing to get a PEM encoded value.
*
* @param pk publicKey for the token signtature verification
*/
public void setPublicKey(RSAPublicKey pk) {
publicKey = pk;
}
/**
* Initializes the authentication handler instance.
* <p>
* This method is invoked by the {@link AuthenticationFilter#init} method.
* </p>
* @param config
* configuration properties to initialize the handler.
*
* @throws ServletException
* thrown if the handler could not be initialized.
*/
@Override
public void init(Properties config) throws ServletException {
super.init(config);
// setup the URL to redirect to for authentication
authenticationProviderUrl = config
.getProperty(AUTHENTICATION_PROVIDER_URL);
if (authenticationProviderUrl == null) {
throw new ServletException(
"Authentication provider URL must not be null - configure: "
+ AUTHENTICATION_PROVIDER_URL);
}
// setup the public key of the token issuer for verification
if (publicKey == null) {
String pemPublicKey = config.getProperty(PUBLIC_KEY_PEM);
if (pemPublicKey == null) {
throw new ServletException(
"Public key for signature validation must be provisioned.");
}
publicKey = CertificateUtil.parseRSAPublicKey(pemPublicKey);
}
// setup the list of valid audiences for token validation
String auds = config.getProperty(EXPECTED_JWT_AUDIENCES);
if (auds != null) {
// parse into the list
String[] audArray = auds.split(",");
audiences = new ArrayList<String>();
for (String a : audArray) {
audiences.add(a);
}
}
// setup custom cookie name if configured
String customCookieName = config.getProperty(JWT_COOKIE_NAME);
if (customCookieName != null) {
cookieName = customCookieName;
}
}
@Override
public AuthenticationToken alternateAuthenticate(HttpServletRequest request,
HttpServletResponse response) throws IOException,
AuthenticationException {
AuthenticationToken token = null;
String serializedJWT = null;
HttpServletRequest req = (HttpServletRequest) request;
serializedJWT = getJWTFromCookie(req);
if (serializedJWT == null) {
String loginURL = constructLoginURL(request);
LOG.info("sending redirect to: " + loginURL);
((HttpServletResponse) response).sendRedirect(loginURL);
} else {
String userName = null;
SignedJWT jwtToken = null;
boolean valid = false;
try {
jwtToken = SignedJWT.parse(serializedJWT);
valid = validateToken(jwtToken);
if (valid) {
userName = jwtToken.getJWTClaimsSet().getSubject();
LOG.info("USERNAME: " + userName);
} else {
LOG.warn("jwtToken failed validation: " + jwtToken.serialize());
}
} catch(ParseException pe) {
// unable to parse the token let's try and get another one
LOG.warn("Unable to parse the JWT token", pe);
}
if (valid) {
LOG.debug("Issuing AuthenticationToken for user.");
token = new AuthenticationToken(userName, userName, getType());
} else {
String loginURL = constructLoginURL(request);
LOG.info("token validation failed - sending redirect to: " + loginURL);
((HttpServletResponse) response).sendRedirect(loginURL);
}
}
return token;
}
/**
* Encapsulate the acquisition of the JWT token from HTTP cookies within the
* request.
*
* @param req servlet request to get the JWT token from
* @return serialized JWT token
*/
protected String getJWTFromCookie(HttpServletRequest req) {
String serializedJWT = null;
Cookie[] cookies = req.getCookies();
String userName = null;
if (cookies != null) {
for (Cookie cookie : cookies) {
if (cookieName.equals(cookie.getName())) {
LOG.info(cookieName
+ " cookie has been found and is being processed");
serializedJWT = cookie.getValue();
break;
}
}
}
return serializedJWT;
}
/**
* Create the URL to be used for authentication of the user in the absence of
* a JWT token within the incoming request.
*
* @param request for getting the original request URL
* @return url to use as login url for redirect
*/
protected String constructLoginURL(HttpServletRequest request) {
String delimiter = "?";
if (authenticationProviderUrl.contains("?")) {
delimiter = "&";
}
String loginURL = authenticationProviderUrl + delimiter
+ ORIGINAL_URL_QUERY_PARAM
+ request.getRequestURL().toString();
return loginURL;
}
/**
* This method provides a single method for validating the JWT for use in
* request processing. It provides for the override of specific aspects of
* this implementation through submethods used within but also allows for the
* override of the entire token validation algorithm.
*
* @param jwtToken the token to validate
* @return true if valid
*/
protected boolean validateToken(SignedJWT jwtToken) {
boolean sigValid = validateSignature(jwtToken);
if (!sigValid) {
LOG.warn("Signature could not be verified");
}
boolean audValid = validateAudiences(jwtToken);
if (!audValid) {
LOG.warn("Audience validation failed.");
}
boolean expValid = validateExpiration(jwtToken);
if (!expValid) {
LOG.info("Expiration validation failed.");
}
return sigValid && audValid && expValid;
}
/**
* Verify the signature of the JWT token in this method. This method depends
* on the public key that was established during init based upon the
* provisioned public key. Override this method in subclasses in order to
* customize the signature verification behavior.
*
* @param jwtToken the token that contains the signature to be validated
* @return valid true if signature verifies successfully; false otherwise
*/
protected boolean validateSignature(SignedJWT jwtToken) {
boolean valid = false;
if (JWSObject.State.SIGNED == jwtToken.getState()) {
LOG.debug("JWT token is in a SIGNED state");
if (jwtToken.getSignature() != null) {
LOG.debug("JWT token signature is not null");
try {
JWSVerifier verifier = new RSASSAVerifier(publicKey);
if (jwtToken.verify(verifier)) {
valid = true;
LOG.debug("JWT token has been successfully verified");
} else {
LOG.warn("JWT signature verification failed.");
}
} catch (JOSEException je) {
LOG.warn("Error while validating signature", je);
}
}
}
return valid;
}
/**
* Validate whether any of the accepted audience claims is present in the
* issued token claims list for audience. Override this method in subclasses
* in order to customize the audience validation behavior.
*
* @param jwtToken
* the JWT token where the allowed audiences will be found
* @return true if an expected audience is present, otherwise false
*/
protected boolean validateAudiences(SignedJWT jwtToken) {
boolean valid = false;
try {
List<String> tokenAudienceList = jwtToken.getJWTClaimsSet()
.getAudience();
// if there were no expected audiences configured then just
// consider any audience acceptable
if (audiences == null) {
valid = true;
} else {
// if any of the configured audiences is found then consider it
// acceptable
boolean found = false;
for (String aud : tokenAudienceList) {
if (audiences.contains(aud)) {
LOG.debug("JWT token audience has been successfully validated");
valid = true;
break;
}
}
if (!valid) {
LOG.warn("JWT audience validation failed.");
}
}
} catch (ParseException pe) {
LOG.warn("Unable to parse the JWT token.", pe);
}
return valid;
}
/**
* Validate that the expiration time of the JWT token has not been violated.
* If it has then throw an AuthenticationException. Override this method in
* subclasses in order to customize the expiration validation behavior.
*
* @param jwtToken the token that contains the expiration date to validate
* @return valid true if the token has not expired; false otherwise
*/
protected boolean validateExpiration(SignedJWT jwtToken) {
boolean valid = false;
try {
Date expires = jwtToken.getJWTClaimsSet().getExpirationTime();
if (expires != null && new Date().before(expires)) {
LOG.debug("JWT token expiration date has been "
+ "successfully validated");
valid = true;
} else {
LOG.warn("JWT expiration date validation failed.");
}
} catch (ParseException pe) {
LOG.warn("JWT expiration date validation failed.", pe);
}
return valid;
}
}
| 13,507 | 36.418283 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.util.AuthToken;
import java.security.Principal;
import javax.servlet.http.HttpServletRequest;
/**
* The {@link AuthenticationToken} contains information about an authenticated
* HTTP client and doubles as the {@link Principal} to be returned by
* authenticated {@link HttpServletRequest}s
* <p>
* The token can be serialized/deserialized to and from a string as it is sent
* and received in HTTP client responses and requests as a HTTP cookie (this is
* done by the {@link AuthenticationFilter}).
*/
public class AuthenticationToken extends AuthToken {
/**
* Constant that identifies an anonymous request.
*/
public static final AuthenticationToken ANONYMOUS = new AuthenticationToken();
private AuthenticationToken() {
super();
}
private AuthenticationToken(AuthToken token) {
super(token.getUserName(), token.getName(), token.getType());
setExpires(token.getExpires());
}
/**
* Creates an authentication token.
*
* @param userName user name.
* @param principal principal (commonly matches the user name, with Kerberos is the full/long principal
* name while the userName is the short name).
* @param type the authentication mechanism name.
* (<code>System.currentTimeMillis() + validityPeriod</code>).
*/
public AuthenticationToken(String userName, String principal, String type) {
super(userName, principal, type);
}
/**
* Sets the expiration of the token.
*
* @param expires expiration time of the token in milliseconds since the epoch.
*/
public void setExpires(long expires) {
if (this != AuthenticationToken.ANONYMOUS) {
super.setExpires(expires);
}
}
/**
* Returns true if the token has expired.
*
* @return true if the token has expired.
*/
public boolean isExpired() {
return super.isExpired();
}
/**
* Parses a string into an authentication token.
*
* @param tokenStr string representation of a token.
*
* @return the parsed authentication token.
*
* @throws AuthenticationException thrown if the string representation could not be parsed into
* an authentication token.
*/
public static AuthenticationToken parse(String tokenStr) throws AuthenticationException {
return new AuthenticationToken(AuthToken.parse(tokenStr));
}
}
| 3,092 | 31.557895 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.Properties;
/**
* Interface for server authentication mechanisms.
* The {@link AuthenticationFilter} manages the lifecycle of the authentication handler.
* Implementations must be thread-safe as one instance is initialized and used for all requests.
*/
public interface AuthenticationHandler {
public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
/**
* Returns the authentication type of the authentication handler.
* This should be a name that uniquely identifies the authentication type.
* For example 'simple' or 'kerberos'.
*
* @return the authentication type of the authentication handler.
*/
public String getType();
/**
* Initializes the authentication handler instance.
* <p>
* This method is invoked by the {@link AuthenticationFilter#init} method.
*
* @param config configuration properties to initialize the handler.
*
* @throws ServletException thrown if the handler could not be initialized.
*/
public void init(Properties config) throws ServletException;
/**
* Destroys the authentication handler instance.
* <p>
* This method is invoked by the {@link AuthenticationFilter#destroy} method.
*/
public void destroy();
/**
* Performs an authentication management operation.
* <p>
* This is useful for handling operations like get/renew/cancel
* delegation tokens which are being handled as operations of the
* service end-point.
* <p>
* If the method returns <code>TRUE</code> the request will continue normal
* processing, this means the method has not produced any HTTP response.
* <p>
* If the method returns <code>FALSE</code> the request will end, this means
* the method has produced the corresponding HTTP response.
*
* @param token the authentication token if any, otherwise <code>NULL</code>.
* @param request the HTTP client request.
* @param response the HTTP client response.
* @return <code>TRUE</code> if the request should be processed as a regular
* request,
* <code>FALSE</code> otherwise.
*
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if an Authentication error occurred.
*/
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException;
/**
* Performs an authentication step for the given HTTP client request.
* <p>
* This method is invoked by the {@link AuthenticationFilter} only if the HTTP client request is
* not yet authenticated.
* <p>
* Depending upon the authentication mechanism being implemented, a particular HTTP client may
* end up making a sequence of invocations before authentication is successfully established (this is
* the case of Kerberos SPNEGO).
* <p>
* This method must return an {@link AuthenticationToken} only if the the HTTP client request has
* been successfully and fully authenticated.
* <p>
* If the HTTP client request has not been completely authenticated, this method must take over
* the corresponding HTTP response and it must return <code>null</code>.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return an {@link AuthenticationToken} if the HTTP client request has been authenticated,
* <code>null</code> otherwise (in this case it must take care of the response).
*
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if an Authentication error occurred.
*/
public AuthenticationToken authenticate(HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException;
}
| 4,759 | 39.338983 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import org.apache.http.client.utils.URLEncodedUtils;
import org.apache.http.NameValuePair;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Properties;
/**
* The <code>PseudoAuthenticationHandler</code> provides a pseudo authentication mechanism that accepts
* the user name specified as a query string parameter.
* <p>
* This mimics the model of Hadoop Simple authentication which trust the 'user.name' property provided in
* the configuration object.
* <p>
* This handler can be configured to support anonymous users.
* <p>
* The only supported configuration property is:
* <ul>
* <li>simple.anonymous.allowed: <code>true|false</code>, default value is <code>false</code></li>
* </ul>
*/
public class PseudoAuthenticationHandler implements AuthenticationHandler {
/**
* Constant that identifies the authentication mechanism.
*/
public static final String TYPE = "simple";
/**
* Constant for the configuration property that indicates if anonymous users are allowed.
*/
public static final String ANONYMOUS_ALLOWED = TYPE + ".anonymous.allowed";
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
private static final String PSEUDO_AUTH = "PseudoAuth";
private boolean acceptAnonymous;
private String type;
/**
* Creates a Hadoop pseudo authentication handler with the default auth-token
* type, <code>simple</code>.
*/
public PseudoAuthenticationHandler() {
this(TYPE);
}
/**
* Creates a Hadoop pseudo authentication handler with a custom auth-token
* type.
*
* @param type auth-token type.
*/
public PseudoAuthenticationHandler(String type) {
this.type = type;
}
/**
* Initializes the authentication handler instance.
* <p>
* This method is invoked by the {@link AuthenticationFilter#init} method.
*
* @param config configuration properties to initialize the handler.
*
* @throws ServletException thrown if the handler could not be initialized.
*/
@Override
public void init(Properties config) throws ServletException {
acceptAnonymous = Boolean.parseBoolean(config.getProperty(ANONYMOUS_ALLOWED, "false"));
}
/**
* Returns if the handler is configured to support anonymous users.
*
* @return if the handler is configured to support anonymous users.
*/
protected boolean getAcceptAnonymous() {
return acceptAnonymous;
}
/**
* Releases any resources initialized by the authentication handler.
* <p>
* This implementation does a NOP.
*/
@Override
public void destroy() {
}
/**
* Returns the authentication type of the authentication handler, 'simple'.
*
* @return the authentication type of the authentication handler, 'simple'.
*/
@Override
public String getType() {
return type;
}
/**
* This is an empty implementation, it always returns <code>TRUE</code>.
*
*
*
* @param token the authentication token if any, otherwise <code>NULL</code>.
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return <code>TRUE</code>
* @throws IOException it is never thrown.
* @throws AuthenticationException it is never thrown.
*/
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
return true;
}
private String getUserName(HttpServletRequest request) {
String queryString = request.getQueryString();
if(queryString == null || queryString.length() == 0) {
return null;
}
List<NameValuePair> list = URLEncodedUtils.parse(queryString, UTF8_CHARSET);
if (list != null) {
for (NameValuePair nv : list) {
if (PseudoAuthenticator.USER_NAME.equals(nv.getName())) {
return nv.getValue();
}
}
}
return null;
}
/**
* Authenticates an HTTP client request.
* <p>
* It extracts the {@link PseudoAuthenticator#USER_NAME} parameter from the query string and creates
* an {@link AuthenticationToken} with it.
* <p>
* If the HTTP client request does not contain the {@link PseudoAuthenticator#USER_NAME} parameter and
* the handler is configured to allow anonymous users it returns the {@link AuthenticationToken#ANONYMOUS}
* token.
* <p>
* If the HTTP client request does not contain the {@link PseudoAuthenticator#USER_NAME} parameter and
* the handler is configured to disallow anonymous users it throws an {@link AuthenticationException}.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return an authentication token if the HTTP client request is accepted and credentials are valid.
*
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if HTTP client request was not accepted as an authentication request.
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
String userName = getUserName(request);
if (userName == null) {
if (getAcceptAnonymous()) {
token = AuthenticationToken.ANONYMOUS;
} else {
response.setStatus(HttpServletResponse.SC_FORBIDDEN);
response.setHeader(WWW_AUTHENTICATE, PSEUDO_AUTH);
token = null;
}
} else {
token = new AuthenticationToken(userName, userName, getType());
}
return token;
}
}
| 6,643 | 32.054726 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSManager;
import org.ietf.jgss.Oid;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
/**
* The {@link KerberosAuthenticationHandler} implements the Kerberos SPNEGO authentication mechanism for HTTP.
* <p>
* The supported configuration properties are:
* <ul>
* <li>kerberos.principal: the Kerberos principal to used by the server. As stated by the Kerberos SPNEGO
* specification, it should be <code>HTTP/${HOSTNAME}@{REALM}</code>. The realm can be omitted from the
* principal as the JDK GSS libraries will use the realm name of the configured default realm.
* It does not have a default value.</li>
* <li>kerberos.keytab: the keytab file containing the credentials for the Kerberos principal.
* It does not have a default value.</li>
* <li>kerberos.name.rules: kerberos names rules to resolve principal names, see
* {@link KerberosName#setRules(String)}</li>
* </ul>
*/
public class KerberosAuthenticationHandler implements AuthenticationHandler {
private static Logger LOG = LoggerFactory.getLogger(KerberosAuthenticationHandler.class);
/**
* Kerberos context configuration for the JDK GSS library.
*/
private static class KerberosConfiguration extends Configuration {
private String keytab;
private String principal;
public KerberosConfiguration(String keytab, String principal) {
this.keytab = keytab;
this.principal = principal;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
if (IBM_JAVA) {
options.put("useKeytab",
keytab.startsWith("file://") ? keytab : "file://" + keytab);
options.put("principal", principal);
options.put("credsType", "acceptor");
} else {
options.put("keyTab", keytab);
options.put("principal", principal);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("isInitiator", "false");
}
options.put("refreshKrb5Config", "true");
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
if (IBM_JAVA) {
options.put("useDefaultCcache", "true");
// The first value searched when "useDefaultCcache" is used.
System.setProperty("KRB5CCNAME", ticketCache);
options.put("renewTGT", "true");
options.put("credsType", "both");
} else {
options.put("ticketCache", ticketCache);
}
}
if (LOG.isDebugEnabled()) {
options.put("debug", "true");
}
return new AppConfigurationEntry[]{
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options),};
}
}
/**
* Constant that identifies the authentication mechanism.
*/
public static final String TYPE = "kerberos";
/**
* Constant for the configuration property that indicates the kerberos principal.
*/
public static final String PRINCIPAL = TYPE + ".principal";
/**
* Constant for the configuration property that indicates the keytab file path.
*/
public static final String KEYTAB = TYPE + ".keytab";
/**
* Constant for the configuration property that indicates the Kerberos name
* rules for the Kerberos principals.
*/
public static final String NAME_RULES = TYPE + ".name.rules";
private String type;
private String keytab;
private GSSManager gssManager;
private Subject serverSubject = new Subject();
private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
/**
* Creates a Kerberos SPNEGO authentication handler with the default
* auth-token type, <code>kerberos</code>.
*/
public KerberosAuthenticationHandler() {
this(TYPE);
}
/**
* Creates a Kerberos SPNEGO authentication handler with a custom auth-token
* type.
*
* @param type auth-token type.
*/
public KerberosAuthenticationHandler(String type) {
this.type = type;
}
/**
* Initializes the authentication handler instance.
* <p>
* It creates a Kerberos context using the principal and keytab specified in the configuration.
* <p>
* This method is invoked by the {@link AuthenticationFilter#init} method.
*
* @param config configuration properties to initialize the handler.
*
* @throws ServletException thrown if the handler could not be initialized.
*/
@Override
public void init(Properties config) throws ServletException {
try {
String principal = config.getProperty(PRINCIPAL);
if (principal == null || principal.trim().length() == 0) {
throw new ServletException("Principal not defined in configuration");
}
keytab = config.getProperty(KEYTAB, keytab);
if (keytab == null || keytab.trim().length() == 0) {
throw new ServletException("Keytab not defined in configuration");
}
if (!new File(keytab).exists()) {
throw new ServletException("Keytab does not exist: " + keytab);
}
// use all SPNEGO principals in the keytab if a principal isn't
// specifically configured
final String[] spnegoPrincipals;
if (principal.equals("*")) {
spnegoPrincipals = KerberosUtil.getPrincipalNames(
keytab, Pattern.compile("HTTP/.*"));
if (spnegoPrincipals.length == 0) {
throw new ServletException("Principals do not exist in the keytab");
}
} else {
spnegoPrincipals = new String[]{principal};
}
String nameRules = config.getProperty(NAME_RULES, null);
if (nameRules != null) {
KerberosName.setRules(nameRules);
}
for (String spnegoPrincipal : spnegoPrincipals) {
LOG.info("Login using keytab {}, for principal {}",
keytab, spnegoPrincipal);
final KerberosConfiguration kerberosConfiguration =
new KerberosConfiguration(keytab, spnegoPrincipal);
final LoginContext loginContext =
new LoginContext("", serverSubject, null, kerberosConfiguration);
try {
loginContext.login();
} catch (LoginException le) {
LOG.warn("Failed to login as [{}]", spnegoPrincipal, le);
throw new AuthenticationException(le);
}
loginContexts.add(loginContext);
}
try {
gssManager = Subject.doAs(serverSubject, new PrivilegedExceptionAction<GSSManager>() {
@Override
public GSSManager run() throws Exception {
return GSSManager.getInstance();
}
});
} catch (PrivilegedActionException ex) {
throw ex.getException();
}
} catch (Exception ex) {
throw new ServletException(ex);
}
}
/**
* Releases any resources initialized by the authentication handler.
* <p>
* It destroys the Kerberos context.
*/
@Override
public void destroy() {
keytab = null;
serverSubject = null;
for (LoginContext loginContext : loginContexts) {
try {
loginContext.logout();
} catch (LoginException ex) {
LOG.warn(ex.getMessage(), ex);
}
}
loginContexts.clear();
}
/**
* Returns the authentication type of the authentication handler, 'kerberos'.
* <p>
*
* @return the authentication type of the authentication handler, 'kerberos'.
*/
@Override
public String getType() {
return type;
}
/**
* Returns the Kerberos principals used by the authentication handler.
*
* @return the Kerberos principals used by the authentication handler.
*/
protected Set<KerberosPrincipal> getPrincipals() {
return serverSubject.getPrincipals(KerberosPrincipal.class);
}
/**
* Returns the keytab used by the authentication handler.
*
* @return the keytab used by the authentication handler.
*/
protected String getKeytab() {
return keytab;
}
/**
* This is an empty implementation, it always returns <code>TRUE</code>.
*
*
*
* @param token the authentication token if any, otherwise <code>NULL</code>.
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return <code>TRUE</code>
* @throws IOException it is never thrown.
* @throws AuthenticationException it is never thrown.
*/
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
return true;
}
/**
* It enforces the the Kerberos SPNEGO authentication sequence returning an {@link AuthenticationToken} only
* after the Kerberos SPNEGO sequence has completed successfully.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return an authentication token if the Kerberos SPNEGO sequence is complete and valid,
* <code>null</code> if it is in progress (in this case the handler handles the response to the client).
*
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if Kerberos SPNEGO sequence failed.
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request, final HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token = null;
String authorization = request.getHeader(KerberosAuthenticator.AUTHORIZATION);
if (authorization == null || !authorization.startsWith(KerberosAuthenticator.NEGOTIATE)) {
response.setHeader(WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
if (authorization == null) {
LOG.trace("SPNEGO starting");
} else {
LOG.warn("'" + KerberosAuthenticator.AUTHORIZATION + "' does not start with '" +
KerberosAuthenticator.NEGOTIATE + "' : {}", authorization);
}
} else {
authorization = authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
final Base64 base64 = new Base64(0);
final byte[] clientToken = base64.decode(authorization);
final String serverName = request.getServerName();
try {
token = Subject.doAs(serverSubject, new PrivilegedExceptionAction<AuthenticationToken>() {
@Override
public AuthenticationToken run() throws Exception {
AuthenticationToken token = null;
GSSContext gssContext = null;
GSSCredential gssCreds = null;
try {
gssCreds = gssManager.createCredential(
gssManager.createName(
KerberosUtil.getServicePrincipal("HTTP", serverName),
KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
GSSCredential.INDEFINITE_LIFETIME,
new Oid[]{
KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
GSSCredential.ACCEPT_ONLY);
gssContext = gssManager.createContext(gssCreds);
byte[] serverToken = gssContext.acceptSecContext(clientToken, 0, clientToken.length);
if (serverToken != null && serverToken.length > 0) {
String authenticate = base64.encodeToString(serverToken);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE,
KerberosAuthenticator.NEGOTIATE + " " + authenticate);
}
if (!gssContext.isEstablished()) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
LOG.trace("SPNEGO in progress");
} else {
String clientPrincipal = gssContext.getSrcName().toString();
KerberosName kerberosName = new KerberosName(clientPrincipal);
String userName = kerberosName.getShortName();
token = new AuthenticationToken(userName, clientPrincipal, getType());
response.setStatus(HttpServletResponse.SC_OK);
LOG.trace("SPNEGO completed for principal [{}]", clientPrincipal);
}
} finally {
if (gssContext != null) {
gssContext.dispose();
}
if (gssCreds != null) {
gssCreds.dispose();
}
}
return token;
}
});
} catch (PrivilegedActionException ex) {
if (ex.getException() instanceof IOException) {
throw (IOException) ex.getException();
}
else {
throw new AuthenticationException(ex.getException());
}
}
}
return token;
}
}
| 15,029 | 36.019704 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AltKerberosAuthenticationHandler.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import java.io.IOException;
import java.util.Locale;
import java.util.Properties;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
/**
* The {@link AltKerberosAuthenticationHandler} behaves exactly the same way as
* the {@link KerberosAuthenticationHandler}, except that it allows for an
* alternative form of authentication for browsers while still using Kerberos
* for Java access. This is an abstract class that should be subclassed
* to allow a developer to implement their own custom authentication for browser
* access. The alternateAuthenticate method will be called whenever a request
* comes from a browser.
*/
public abstract class AltKerberosAuthenticationHandler
extends KerberosAuthenticationHandler {
/**
* Constant that identifies the authentication mechanism.
*/
public static final String TYPE = "alt-kerberos";
/**
* Constant for the configuration property that indicates which user agents
* are not considered browsers (comma separated)
*/
public static final String NON_BROWSER_USER_AGENTS =
TYPE + ".non-browser.user-agents";
private static final String NON_BROWSER_USER_AGENTS_DEFAULT =
"java,curl,wget,perl";
private String[] nonBrowserUserAgents;
/**
* Returns the authentication type of the authentication handler,
* 'alt-kerberos'.
*
* @return the authentication type of the authentication handler,
* 'alt-kerberos'.
*/
@Override
public String getType() {
return TYPE;
}
@Override
public void init(Properties config) throws ServletException {
super.init(config);
nonBrowserUserAgents = config.getProperty(
NON_BROWSER_USER_AGENTS, NON_BROWSER_USER_AGENTS_DEFAULT)
.split("\\W*,\\W*");
for (int i = 0; i < nonBrowserUserAgents.length; i++) {
nonBrowserUserAgents[i] =
nonBrowserUserAgents[i].toLowerCase(Locale.ENGLISH);
}
}
/**
* It enforces the the Kerberos SPNEGO authentication sequence returning an
* {@link AuthenticationToken} only after the Kerberos SPNEGO sequence has
* completed successfully (in the case of Java access) and only after the
* custom authentication implemented by the subclass in alternateAuthenticate
* has completed successfully (in the case of browser access).
*
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return an authentication token if the request is authorized or null
*
* @throws IOException thrown if an IO error occurred
* @throws AuthenticationException thrown if an authentication error occurred
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
if (isBrowser(request.getHeader("User-Agent"))) {
token = alternateAuthenticate(request, response);
}
else {
token = super.authenticate(request, response);
}
return token;
}
/**
* This method parses the User-Agent String and returns whether or not it
* refers to a browser. If its not a browser, then Kerberos authentication
* will be used; if it is a browser, alternateAuthenticate from the subclass
* will be used.
* <p>
* A User-Agent String is considered to be a browser if it does not contain
* any of the values from alt-kerberos.non-browser.user-agents; the default
* behavior is to consider everything a browser unless it contains one of:
* "java", "curl", "wget", or "perl". Subclasses can optionally override
* this method to use different behavior.
*
* @param userAgent The User-Agent String, or null if there isn't one
* @return true if the User-Agent String refers to a browser, false if not
*/
protected boolean isBrowser(String userAgent) {
if (userAgent == null) {
return false;
}
userAgent = userAgent.toLowerCase(Locale.ENGLISH);
boolean isBrowser = true;
for (String nonBrowserUserAgent : nonBrowserUserAgents) {
if (userAgent.contains(nonBrowserUserAgent)) {
isBrowser = false;
break;
}
}
return isBrowser;
}
/**
* Subclasses should implement this method to provide the custom
* authentication to be used for browsers.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
* @return an authentication token if the request is authorized, or null
* @throws IOException thrown if an IO error occurs
* @throws AuthenticationException thrown if an authentication error occurs
*/
public abstract AuthenticationToken alternateAuthenticate(
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException;
}
| 5,652 | 36.686667 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/util/PlatformName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A helper class for getting build-info of the java-vm.
*
*/
@InterfaceAudience.LimitedPrivate({"HBase"})
@InterfaceStability.Unstable
public class PlatformName {
/**
* The complete platform 'name' to identify the platform as
* per the java-vm.
*/
public static final String PLATFORM_NAME =
(System.getProperty("os.name").startsWith("Windows")
? System.getenv("os") : System.getProperty("os.name"))
+ "-" + System.getProperty("os.arch")
+ "-" + System.getProperty("sun.arch.data.model");
/**
* The java vendor name used in this platform.
*/
public static final String JAVA_VENDOR_NAME = System.getProperty("java.vendor");
/**
* A public static variable to indicate the current java vendor is
* IBM java or not.
*/
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM");
public static void main(String[] args) {
System.out.println(PLATFORM_NAME);
}
}
| 1,906 | 33.053571 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/portmap/TestPortmap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.portmap;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.util.Map;
import org.junit.Assert;
import org.apache.hadoop.oncrpc.RpcCall;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.CredentialsNone;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
public class TestPortmap {
private static Portmap pm = new Portmap();
private static final int SHORT_TIMEOUT_MILLISECONDS = 10;
private static final int RETRY_TIMES = 5;
private int xid;
@BeforeClass
public static void setup() {
pm.start(SHORT_TIMEOUT_MILLISECONDS, new InetSocketAddress("localhost", 0),
new InetSocketAddress("localhost", 0));
}
@AfterClass
public static void tearDown() {
pm.shutdown();
}
@Test(timeout = 1000)
public void testIdle() throws InterruptedException, IOException {
Socket s = new Socket();
try {
s.connect(pm.getTcpServerLocalAddress());
int i = 0;
while (!s.isConnected() && i < RETRY_TIMES) {
++i;
Thread.sleep(SHORT_TIMEOUT_MILLISECONDS);
}
Assert.assertTrue("Failed to connect to the server", s.isConnected()
&& i < RETRY_TIMES);
int b = s.getInputStream().read();
Assert.assertTrue("The server failed to disconnect", b == -1);
} finally {
s.close();
}
}
@Test(timeout = 1000)
public void testRegistration() throws IOException, InterruptedException {
XDR req = new XDR();
RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM,
RpcProgramPortmap.VERSION,
RpcProgramPortmap.PMAPPROC_SET,
new CredentialsNone(), new VerifierNone()).write(req);
PortmapMapping sent = new PortmapMapping(90000, 1,
PortmapMapping.TRANSPORT_TCP, 1234);
sent.serialize(req);
byte[] reqBuf = req.getBytes();
DatagramSocket s = new DatagramSocket();
DatagramPacket p = new DatagramPacket(reqBuf, reqBuf.length,
pm.getUdpServerLoAddress());
try {
s.send(p);
} finally {
s.close();
}
// Give the server a chance to process the request
Thread.sleep(100);
boolean found = false;
@SuppressWarnings("unchecked")
Map<String, PortmapMapping> map = (Map<String, PortmapMapping>) Whitebox
.getInternalState(pm.getHandler(), "map");
for (PortmapMapping m : map.values()) {
if (m.getPort() == sent.getPort()
&& PortmapMapping.key(m).equals(PortmapMapping.key(sent))) {
found = true;
break;
}
}
Assert.assertTrue("Registration failed", found);
}
}
| 3,633 | 30.059829 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.junit.Assert;
import org.junit.Test;
public class TestNfsExports {
private final String address1 = "192.168.0.12";
private final String address2 = "10.0.0.12";
private final String hostname1 = "a.b.com";
private final String hostname2 = "a.b.org";
private static final long ExpirationPeriod =
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT * 1000 * 1000;
private static final int CacheSize = Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT;
private static final long NanosPerMillis = 1000000;
@Test
public void testWildcardRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, "* rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
}
@Test
public void testWildcardRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, "* ro");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
}
@Test
public void testExactAddressRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, address1
+ " rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertFalse(AccessPrivilege.READ_WRITE == matcher
.getAccessPrivilege(address2, hostname1));
}
@Test
public void testExactAddressRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, address1);
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testExactHostRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, hostname1
+ " rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
}
@Test
public void testExactHostRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, hostname1);
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
}
@Test
public void testCidrShortRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.0/22 rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testCidrShortRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.0/22");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testCidrLongRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.0/255.255.252.0 rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testCidrLongRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.0/255.255.252.0");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testRegexIPRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.[0-9]+ rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testRegexIPRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.[0-9]+");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.NONE,
matcher.getAccessPrivilege(address2, hostname1));
}
@Test
public void testRegexHostRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"[a-z]+.b.com rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
// address1 will hit the cache
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname2));
}
@Test
public void testRegexHostRO() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"[a-z]+.b.com");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
// address1 will hit the cache
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname2));
}
@Test
public void testRegexGrouping() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.(12|34)");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
// address1 will hit the cache
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname2));
matcher = new NfsExports(CacheSize, ExpirationPeriod, "\\w*.a.b.com");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege("1.2.3.4", "web.a.b.com"));
// address "1.2.3.4" will hit the cache
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege("1.2.3.4", "email.a.b.org"));
}
@Test
public void testMultiMatchers() throws Exception {
long shortExpirationPeriod = 1 * 1000 * 1000 * 1000; // 1s
NfsExports matcher = new NfsExports(CacheSize, shortExpirationPeriod,
"192.168.0.[0-9]+;[a-z]+.b.com rw");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname2));
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, address1));
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address2, hostname1));
// address2 will hit the cache
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address2, hostname2));
Thread.sleep(1000);
// no cache for address2 now
AccessPrivilege ap;
long startNanos = System.nanoTime();
do {
ap = matcher.getAccessPrivilege(address2, address2);
if (ap == AccessPrivilege.NONE) {
break;
}
Thread.sleep(500);
} while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
Assert.assertEquals(AccessPrivilege.NONE, ap);
}
@Test(expected=IllegalArgumentException.class)
public void testInvalidHost() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"foo#bar");
}
@Test(expected=IllegalArgumentException.class)
public void testInvalidSeparator() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"foo ro : bar rw");
}
}
| 8,422 | 35.942982 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsTime.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs;
import org.junit.Assert;
import org.apache.hadoop.nfs.NfsTime;
import org.apache.hadoop.oncrpc.XDR;
import org.junit.Test;
public class TestNfsTime {
@Test
public void testConstructor() {
NfsTime nfstime = new NfsTime(1001);
Assert.assertEquals(1, nfstime.getSeconds());
Assert.assertEquals(1000000, nfstime.getNseconds());
}
@Test
public void testSerializeDeserialize() {
// Serialize NfsTime
NfsTime t1 = new NfsTime(1001);
XDR xdr = new XDR();
t1.serialize(xdr);
// Deserialize it back
NfsTime t2 = NfsTime.deserialize(xdr.asReadOnlyWrap());
// Ensure the NfsTimes are equal
Assert.assertEquals(t1, t2);
}
}
| 1,521 | 30.708333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestFileHandle.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3;
import org.junit.Assert;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
import org.junit.Test;
public class TestFileHandle {
@Test
public void testConstructor() {
FileHandle handle = new FileHandle(1024);
XDR xdr = new XDR();
handle.serialize(xdr);
Assert.assertEquals(handle.getFileId(), 1024);
// Deserialize it back
FileHandle handle2 = new FileHandle();
handle2.deserialize(xdr.asReadOnlyWrap());
Assert.assertEquals("Failed: Assert 1024 is id ", 1024,
handle.getFileId());
}
}
| 1,417 | 33.585366 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestXDR.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import org.junit.Assert;
import org.junit.Test;
public class TestXDR {
static final int WRITE_VALUE=23;
private void serializeInt(int times) {
XDR w = new XDR();
for (int i = 0; i < times; ++i)
w.writeInt(WRITE_VALUE);
XDR r = w.asReadOnlyWrap();
for (int i = 0; i < times; ++i)
Assert.assertEquals(
WRITE_VALUE,r.readInt());
}
private void serializeLong(int times) {
XDR w = new XDR();
for (int i = 0; i < times; ++i)
w.writeLongAsHyper(WRITE_VALUE);
XDR r = w.asReadOnlyWrap();
for (int i = 0; i < times; ++i)
Assert.assertEquals(WRITE_VALUE, r.readHyper());
}
@Test
public void testPerformance() {
final int TEST_TIMES = 8 << 20;
serializeInt(TEST_TIMES);
serializeLong(TEST_TIMES);
}
}
| 1,635 | 29.867925 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcCall.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.oncrpc.security.CredentialsNone;
import org.apache.hadoop.oncrpc.security.Credentials;
import org.apache.hadoop.oncrpc.security.Verifier;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.junit.Test;
/**
* Tests for {@link RpcCall}
*/
public class TestRpcCall {
@Test
public void testConstructor() {
Credentials credential = new CredentialsNone();
Verifier verifier = new VerifierNone();
int rpcVersion = RpcCall.RPC_VERSION;
int program = 2;
int version = 3;
int procedure = 4;
RpcCall call = new RpcCall(0, RpcMessage.Type.RPC_CALL, rpcVersion, program,
version, procedure, credential, verifier);
assertEquals(0, call.getXid());
assertEquals(RpcMessage.Type.RPC_CALL, call.getMessageType());
assertEquals(rpcVersion, call.getRpcVersion());
assertEquals(program, call.getProgram());
assertEquals(version, call.getVersion());
assertEquals(procedure, call.getProcedure());
assertEquals(credential, call.getCredential());
assertEquals(verifier, call.getVerifier());
}
@Test(expected=IllegalArgumentException.class)
public void testInvalidRpcVersion() {
int invalidRpcVersion = 3;
new RpcCall(0, RpcMessage.Type.RPC_CALL, invalidRpcVersion, 2, 3, 4, null, null);
}
@Test(expected=IllegalArgumentException.class)
public void testInvalidRpcMessageType() {
RpcMessage.Type invalidMessageType = RpcMessage.Type.RPC_REPLY; // Message typ is not RpcMessage.RPC_CALL
new RpcCall(0, invalidMessageType, RpcCall.RPC_VERSION, 2, 3, 4, null, null);
}
}
| 2,482 | 37.2 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.nio.ByteBuffer;
import java.util.Random;
import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
import org.apache.hadoop.oncrpc.security.CredentialsNone;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.log4j.Level;
import org.apache.commons.logging.impl.Log4JLogger;
import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelException;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.junit.Test;
import org.mockito.Mockito;
public class TestFrameDecoder {
static {
((Log4JLogger) RpcProgram.LOG).getLogger().setLevel(Level.ALL);
}
private static int resultSize;
static void testRequest(XDR request, int serverPort) {
// Reset resultSize so as to avoid interference from other tests in this class.
resultSize = 0;
SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", serverPort, request,
true);
tcpClient.run();
}
static class TestRpcProgram extends RpcProgram {
protected TestRpcProgram(String program, String host, int port,
int progNumber, int lowProgVersion, int highProgVersion,
boolean allowInsecurePorts) {
super(program, host, port, progNumber, lowProgVersion, highProgVersion,
null, allowInsecurePorts);
}
@Override
protected void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
// This is just like what's done in RpcProgramMountd#handleInternal and
// RpcProgramNfs3#handleInternal.
RpcCall rpcCall = (RpcCall) info.header();
final int procedure = rpcCall.getProcedure();
if (procedure != 0) {
boolean portMonitorSuccess = doPortMonitoring(info.remoteAddress());
if (!portMonitorSuccess) {
sendRejectedReply(rpcCall, info.remoteAddress(), ctx);
return;
}
}
resultSize = info.data().readableBytes();
RpcAcceptedReply reply = RpcAcceptedReply.getAcceptInstance(1234,
new VerifierNone());
XDR out = new XDR();
reply.write(out);
ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
}
@Override
protected boolean isIdempotent(RpcCall call) {
return false;
}
}
@Test
public void testSingleFrame() {
RpcFrameDecoder decoder = new RpcFrameDecoder();
// Test "Length field is not received yet"
ByteBuffer buffer = ByteBuffer.allocate(1);
ChannelBuffer buf = new ByteBufferBackedChannelBuffer(buffer);
ChannelBuffer channelBuffer = (ChannelBuffer) decoder.decode(
Mockito.mock(ChannelHandlerContext.class), Mockito.mock(Channel.class),
buf);
assertTrue(channelBuffer == null);
// Test all bytes are not received yet
byte[] fragment = new byte[4 + 9];
fragment[0] = (byte) (1 << 7); // final fragment
fragment[1] = 0;
fragment[2] = 0;
fragment[3] = (byte) 10; // fragment size = 10 bytes
assertTrue(XDR.isLastFragment(fragment));
assertTrue(XDR.fragmentSize(fragment)==10);
buffer = ByteBuffer.allocate(4 + 9);
buffer.put(fragment);
buffer.flip();
buf = new ByteBufferBackedChannelBuffer(buffer);
channelBuffer = (ChannelBuffer) decoder.decode(
Mockito.mock(ChannelHandlerContext.class), Mockito.mock(Channel.class),
buf);
assertTrue(channelBuffer == null);
}
@Test
public void testMultipleFrames() {
RpcFrameDecoder decoder = new RpcFrameDecoder();
// Test multiple frames
byte[] fragment1 = new byte[4 + 10];
fragment1[0] = 0; // not final fragment
fragment1[1] = 0;
fragment1[2] = 0;
fragment1[3] = (byte) 10; // fragment size = 10 bytes
assertFalse(XDR.isLastFragment(fragment1));
assertTrue(XDR.fragmentSize(fragment1)==10);
// decoder should wait for the final fragment
ByteBuffer buffer = ByteBuffer.allocate(4 + 10);
buffer.put(fragment1);
buffer.flip();
ChannelBuffer buf = new ByteBufferBackedChannelBuffer(buffer);
ChannelBuffer channelBuffer = (ChannelBuffer) decoder.decode(
Mockito.mock(ChannelHandlerContext.class), Mockito.mock(Channel.class),
buf);
assertTrue(channelBuffer == null);
byte[] fragment2 = new byte[4 + 10];
fragment2[0] = (byte) (1 << 7); // final fragment
fragment2[1] = 0;
fragment2[2] = 0;
fragment2[3] = (byte) 10; // fragment size = 10 bytes
assertTrue(XDR.isLastFragment(fragment2));
assertTrue(XDR.fragmentSize(fragment2)==10);
buffer = ByteBuffer.allocate(4 + 10);
buffer.put(fragment2);
buffer.flip();
buf = new ByteBufferBackedChannelBuffer(buffer);
channelBuffer = (ChannelBuffer) decoder.decode(
Mockito.mock(ChannelHandlerContext.class), Mockito.mock(Channel.class),
buf);
assertTrue(channelBuffer != null);
// Complete frame should have to total size 10+10=20
assertEquals(20, channelBuffer.readableBytes());
}
@Test
public void testFrames() {
int serverPort = startRpcServer(true);
XDR xdrOut = createGetportMount();
int headerSize = xdrOut.size();
int bufsize = 2 * 1024 * 1024;
byte[] buffer = new byte[bufsize];
xdrOut.writeFixedOpaque(buffer);
int requestSize = xdrOut.size() - headerSize;
// Send the request to the server
testRequest(xdrOut, serverPort);
// Verify the server got the request with right size
assertEquals(requestSize, resultSize);
}
@Test
public void testUnprivilegedPort() {
// Don't allow connections from unprivileged ports. Given that this test is
// presumably not being run by root, this will be the case.
int serverPort = startRpcServer(false);
XDR xdrOut = createGetportMount();
int bufsize = 2 * 1024 * 1024;
byte[] buffer = new byte[bufsize];
xdrOut.writeFixedOpaque(buffer);
// Send the request to the server
testRequest(xdrOut, serverPort);
// Verify the server rejected the request.
assertEquals(0, resultSize);
// Ensure that the NULL procedure does in fact succeed.
xdrOut = new XDR();
createPortmapXDRheader(xdrOut, 0);
int headerSize = xdrOut.size();
buffer = new byte[bufsize];
xdrOut.writeFixedOpaque(buffer);
int requestSize = xdrOut.size() - headerSize;
// Send the request to the server
testRequest(xdrOut, serverPort);
// Verify the server did not reject the request.
assertEquals(requestSize, resultSize);
}
private static int startRpcServer(boolean allowInsecurePorts) {
Random rand = new Random();
int serverPort = 30000 + rand.nextInt(10000);
int retries = 10; // A few retries in case initial choice is in use.
while (true) {
try {
RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
"localhost", serverPort, 100000, 1, 2, allowInsecurePorts);
SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1);
tcpServer.run();
break; // Successfully bound a port, break out.
} catch (ChannelException ce) {
if (retries-- > 0) {
serverPort += rand.nextInt(20); // Port in use? Try another.
} else {
throw ce; // Out of retries.
}
}
}
return serverPort;
}
static void createPortmapXDRheader(XDR xdr_out, int procedure) {
// Make this a method
RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(),
new VerifierNone()).write(xdr_out);
}
static XDR createGetportMount() {
XDR xdr_out = new XDR();
createPortmapXDRheader(xdr_out, 3);
return xdr_out;
}
/*
* static void testGetport() { XDR xdr_out = new XDR();
*
* createPortmapXDRheader(xdr_out, 3);
*
* xdr_out.writeInt(100003); xdr_out.writeInt(3); xdr_out.writeInt(6);
* xdr_out.writeInt(0);
*
* XDR request2 = new XDR();
*
* createPortmapXDRheader(xdr_out, 3); request2.writeInt(100003);
* request2.writeInt(3); request2.writeInt(6); request2.writeInt(0);
*
* testRequest(xdr_out); }
*
* static void testDump() { XDR xdr_out = new XDR();
* createPortmapXDRheader(xdr_out, 4); testRequest(xdr_out); }
*/
}
| 9,412 | 33.354015 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcCallCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.hadoop.oncrpc.RpcCallCache.CacheEntry;
import org.apache.hadoop.oncrpc.RpcCallCache.ClientRequest;
import org.junit.Test;
import static org.mockito.Mockito.*;
/**
* Unit tests for {@link RpcCallCache}
*/
public class TestRpcCallCache {
@Test(expected=IllegalArgumentException.class)
public void testRpcCallCacheConstructorIllegalArgument0(){
new RpcCallCache("test", 0);
}
@Test(expected=IllegalArgumentException.class)
public void testRpcCallCacheConstructorIllegalArgumentNegative(){
new RpcCallCache("test", -1);
}
@Test
public void testRpcCallCacheConstructor(){
RpcCallCache cache = new RpcCallCache("test", 100);
assertEquals("test", cache.getProgram());
}
@Test
public void testAddRemoveEntries() throws UnknownHostException {
RpcCallCache cache = new RpcCallCache("test", 100);
InetAddress clientIp = InetAddress.getByName("1.1.1.1");
int xid = 100;
// Ensure null is returned when there is no entry in the cache
// An entry is added to indicate the request is in progress
CacheEntry e = cache.checkOrAddToCache(clientIp, xid);
assertNull(e);
e = cache.checkOrAddToCache(clientIp, xid);
validateInprogressCacheEntry(e);
// Set call as completed
RpcResponse response = mock(RpcResponse.class);
cache.callCompleted(clientIp, xid, response);
e = cache.checkOrAddToCache(clientIp, xid);
validateCompletedCacheEntry(e, response);
}
private void validateInprogressCacheEntry(CacheEntry c) {
assertTrue(c.isInProgress());
assertFalse(c.isCompleted());
assertNull(c.getResponse());
}
private void validateCompletedCacheEntry(CacheEntry c, RpcResponse response) {
assertFalse(c.isInProgress());
assertTrue(c.isCompleted());
assertEquals(response, c.getResponse());
}
@Test
public void testCacheEntry() {
CacheEntry c = new CacheEntry();
validateInprogressCacheEntry(c);
assertTrue(c.isInProgress());
assertFalse(c.isCompleted());
assertNull(c.getResponse());
RpcResponse response = mock(RpcResponse.class);
c.setResponse(response);
validateCompletedCacheEntry(c, response);
}
@Test
public void testCacheFunctionality() throws UnknownHostException {
RpcCallCache cache = new RpcCallCache("Test", 10);
// Add 20 entries to the cache and only last 10 should be retained
int size = 0;
for (int clientId = 0; clientId < 20; clientId++) {
InetAddress clientIp = InetAddress.getByName("1.1.1."+clientId);
System.out.println("Adding " + clientIp);
cache.checkOrAddToCache(clientIp, 0);
size = Math.min(++size, 10);
System.out.println("Cache size " + cache.size());
assertEquals(size, cache.size()); // Ensure the cache size is correct
// Ensure the cache entries are correct
int startEntry = Math.max(clientId - 10 + 1, 0);
Iterator<Entry<ClientRequest, CacheEntry>> iterator = cache.iterator();
for (int i = 0; i < size; i++) {
ClientRequest key = iterator.next().getKey();
System.out.println("Entry " + key.getClientId());
assertEquals(InetAddress.getByName("1.1.1." + (startEntry + i)),
key.getClientId());
}
// Ensure cache entries are returned as in progress.
for (int i = 0; i < size; i++) {
CacheEntry e = cache.checkOrAddToCache(
InetAddress.getByName("1.1.1." + (startEntry + i)), 0);
assertNotNull(e);
assertTrue(e.isInProgress());
assertFalse(e.isCompleted());
}
}
}
}
| 4,794 | 33.746377 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcDeniedReply.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import org.apache.hadoop.oncrpc.RpcDeniedReply.RejectState;
import org.apache.hadoop.oncrpc.RpcReply.ReplyState;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.junit.Assert;
import org.junit.Test;
/**
* Test for {@link RpcDeniedReply}
*/
public class TestRpcDeniedReply {
@Test
public void testRejectStateFromValue() {
Assert.assertEquals(RejectState.RPC_MISMATCH, RejectState.fromValue(0));
Assert.assertEquals(RejectState.AUTH_ERROR, RejectState.fromValue(1));
}
@Test(expected=IndexOutOfBoundsException.class)
public void testRejectStateFromInvalidValue1() {
RejectState.fromValue(2);
}
@Test
public void testConstructor() {
RpcDeniedReply reply = new RpcDeniedReply(0, ReplyState.MSG_ACCEPTED,
RejectState.AUTH_ERROR, new VerifierNone());
Assert.assertEquals(0, reply.getXid());
Assert.assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType());
Assert.assertEquals(ReplyState.MSG_ACCEPTED, reply.getState());
Assert.assertEquals(RejectState.AUTH_ERROR, reply.getRejectState());
}
}
| 1,920 | 36.666667 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
import org.apache.hadoop.oncrpc.RpcReply.ReplyState;
import org.apache.hadoop.oncrpc.security.Verifier;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.junit.Test;
/**
* Test for {@link RpcAcceptedReply}
*/
public class TestRpcAcceptedReply {
@Test
public void testAcceptState() {
assertEquals(AcceptState.SUCCESS, AcceptState.fromValue(0));
assertEquals(AcceptState.PROG_UNAVAIL, AcceptState.fromValue(1));
assertEquals(AcceptState.PROG_MISMATCH, AcceptState.fromValue(2));
assertEquals(AcceptState.PROC_UNAVAIL, AcceptState.fromValue(3));
assertEquals(AcceptState.GARBAGE_ARGS, AcceptState.fromValue(4));
assertEquals(AcceptState.SYSTEM_ERR, AcceptState.fromValue(5));
}
@Test(expected = IndexOutOfBoundsException.class)
public void testAcceptStateFromInvalidValue() {
AcceptState.fromValue(6);
}
@Test
public void testConstructor() {
Verifier verifier = new VerifierNone();
RpcAcceptedReply reply = new RpcAcceptedReply(0,
ReplyState.MSG_ACCEPTED, verifier, AcceptState.SUCCESS);
assertEquals(0, reply.getXid());
assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType());
assertEquals(ReplyState.MSG_ACCEPTED, reply.getState());
assertEquals(verifier, reply.getVerifier());
assertEquals(AcceptState.SUCCESS, reply.getAcceptState());
}
}
| 2,307 | 37.466667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcReply.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import org.apache.hadoop.oncrpc.RpcReply.ReplyState;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.junit.Assert;
import org.junit.Test;
/**
* Test for {@link RpcReply}
*/
public class TestRpcReply {
@Test
public void testReplyStateFromValue() {
Assert.assertEquals(ReplyState.MSG_ACCEPTED, ReplyState.fromValue(0));
Assert.assertEquals(ReplyState.MSG_DENIED, ReplyState.fromValue(1));
}
@Test(expected=IndexOutOfBoundsException.class)
public void testReplyStateFromInvalidValue1() {
ReplyState.fromValue(2);
}
@Test
public void testRpcReply() {
RpcReply reply = new RpcReply(0, ReplyState.MSG_ACCEPTED,
new VerifierNone()) {
@Override
public XDR write(XDR xdr) {
return null;
}
};
Assert.assertEquals(0, reply.getXid());
Assert.assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType());
Assert.assertEquals(ReplyState.MSG_ACCEPTED, reply.getState());
}
}
| 1,833 | 32.345455 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcMessage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import org.junit.Assert;
import org.junit.Test;
/**
* Test for {@link RpcMessage}
*/
public class TestRpcMessage {
private RpcMessage getRpcMessage(int xid, RpcMessage.Type msgType) {
return new RpcMessage(xid, msgType) {
@Override
public XDR write(XDR xdr) {
return null;
}
};
}
@Test
public void testRpcMessage() {
RpcMessage msg = getRpcMessage(0, RpcMessage.Type.RPC_CALL);
Assert.assertEquals(0, msg.getXid());
Assert.assertEquals(RpcMessage.Type.RPC_CALL, msg.getMessageType());
}
@Test
public void testValidateMessage() {
RpcMessage msg = getRpcMessage(0, RpcMessage.Type.RPC_CALL);
msg.validateMessageType(RpcMessage.Type.RPC_CALL);
}
@Test(expected = IllegalArgumentException.class)
public void testValidateMessageException() {
RpcMessage msg = getRpcMessage(0, RpcMessage.Type.RPC_CALL);
msg.validateMessageType(RpcMessage.Type.RPC_REPLY);
}
}
| 1,792 | 31.6 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestRpcAuthInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.oncrpc.security.RpcAuthInfo;
import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
import org.junit.Test;
/**
* Tests for {@link RpcAuthInfo}
*/
public class TestRpcAuthInfo {
@Test
public void testAuthFlavor() {
assertEquals(AuthFlavor.AUTH_NONE, AuthFlavor.fromValue(0));
assertEquals(AuthFlavor.AUTH_SYS, AuthFlavor.fromValue(1));
assertEquals(AuthFlavor.AUTH_SHORT, AuthFlavor.fromValue(2));
assertEquals(AuthFlavor.AUTH_DH, AuthFlavor.fromValue(3));
assertEquals(AuthFlavor.RPCSEC_GSS, AuthFlavor.fromValue(6));
}
@Test(expected=IllegalArgumentException.class)
public void testInvalidAuthFlavor() {
assertEquals(AuthFlavor.AUTH_NONE, AuthFlavor.fromValue(4));
}
}
| 1,644 | 36.386364 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestCredentialsSys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.CredentialsSys;
import org.junit.Test;
/**
* Test for {@link CredentialsSys}
*/
public class TestCredentialsSys {
@Test
public void testReadWrite() {
CredentialsSys credential = new CredentialsSys();
credential.setUID(0);
credential.setGID(1);
XDR xdr = new XDR();
credential.write(xdr);
CredentialsSys newCredential = new CredentialsSys();
newCredential.read(xdr.asReadOnlyWrap());
assertEquals(0, newCredential.getUID());
assertEquals(1, newCredential.getGID());
}
}
| 1,505 | 31.042553 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.portmap;
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.VerifierNone;
/**
* Helper utility for sending portmap response.
*/
public class PortmapResponse {
public static XDR voidReply(XDR xdr, int xid) {
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
return xdr;
}
public static XDR intReply(XDR xdr, int xid, int value) {
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
xdr.writeInt(value);
return xdr;
}
public static XDR booleanReply(XDR xdr, int xid, boolean value) {
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
xdr.writeBoolean(value);
return xdr;
}
public static XDR pmapList(XDR xdr, int xid, PortmapMapping[] list) {
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
for (PortmapMapping mapping : list) {
xdr.writeBoolean(true); // Value follows
mapping.serialize(xdr);
}
xdr.writeBoolean(false); // No value follows
return xdr;
}
}
| 1,940 | 34.290909 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.portmap;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
import org.apache.hadoop.oncrpc.RpcCall;
import org.apache.hadoop.oncrpc.RpcInfo;
import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.RpcResponse;
import org.apache.hadoop.oncrpc.RpcUtil;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.group.ChannelGroup;
import org.jboss.netty.handler.timeout.IdleState;
import org.jboss.netty.handler.timeout.IdleStateAwareChannelUpstreamHandler;
import org.jboss.netty.handler.timeout.IdleStateEvent;
final class RpcProgramPortmap extends IdleStateAwareChannelUpstreamHandler {
static final int PROGRAM = 100000;
static final int VERSION = 2;
static final int PMAPPROC_NULL = 0;
static final int PMAPPROC_SET = 1;
static final int PMAPPROC_UNSET = 2;
static final int PMAPPROC_GETPORT = 3;
static final int PMAPPROC_DUMP = 4;
static final int PMAPPROC_GETVERSADDR = 9;
private static final Log LOG = LogFactory.getLog(RpcProgramPortmap.class);
private final ConcurrentHashMap<String, PortmapMapping> map = new ConcurrentHashMap<String, PortmapMapping>();
/** ChannelGroup that remembers all active channels for gracefully shutdown. */
private final ChannelGroup allChannels;
RpcProgramPortmap(ChannelGroup allChannels) {
this.allChannels = allChannels;
PortmapMapping m = new PortmapMapping(PROGRAM, VERSION,
PortmapMapping.TRANSPORT_TCP, RpcProgram.RPCB_PORT);
PortmapMapping m1 = new PortmapMapping(PROGRAM, VERSION,
PortmapMapping.TRANSPORT_UDP, RpcProgram.RPCB_PORT);
map.put(PortmapMapping.key(m), m);
map.put(PortmapMapping.key(m1), m1);
}
/**
* This procedure does no work. By convention, procedure zero of any protocol
* takes no parameters and returns no results.
*/
private XDR nullOp(int xid, XDR in, XDR out) {
return PortmapResponse.voidReply(out, xid);
}
/**
* When a program first becomes available on a machine, it registers itself
* with the port mapper program on the same machine. The program passes its
* program number "prog", version number "vers", transport protocol number
* "prot", and the port "port" on which it awaits service request. The
* procedure returns a boolean reply whose value is "TRUE" if the procedure
* successfully established the mapping and "FALSE" otherwise. The procedure
* refuses to establish a mapping if one already exists for the tuple
* "(prog, vers, prot)".
*/
private XDR set(int xid, XDR in, XDR out) {
PortmapMapping mapping = PortmapRequest.mapping(in);
String key = PortmapMapping.key(mapping);
if (LOG.isDebugEnabled()) {
LOG.debug("Portmap set key=" + key);
}
map.put(key, mapping);
return PortmapResponse.intReply(out, xid, mapping.getPort());
}
/**
* When a program becomes unavailable, it should unregister itself with the
* port mapper program on the same machine. The parameters and results have
* meanings identical to those of "PMAPPROC_SET". The protocol and port number
* fields of the argument are ignored.
*/
private XDR unset(int xid, XDR in, XDR out) {
PortmapMapping mapping = PortmapRequest.mapping(in);
String key = PortmapMapping.key(mapping);
if (LOG.isDebugEnabled())
LOG.debug("Portmap remove key=" + key);
map.remove(key);
return PortmapResponse.booleanReply(out, xid, true);
}
/**
* Given a program number "prog", version number "vers", and transport
* protocol number "prot", this procedure returns the port number on which the
* program is awaiting call requests. A port value of zeros means the program
* has not been registered. The "port" field of the argument is ignored.
*/
private XDR getport(int xid, XDR in, XDR out) {
PortmapMapping mapping = PortmapRequest.mapping(in);
String key = PortmapMapping.key(mapping);
if (LOG.isDebugEnabled()) {
LOG.debug("Portmap GETPORT key=" + key + " " + mapping);
}
PortmapMapping value = map.get(key);
int res = 0;
if (value != null) {
res = value.getPort();
if (LOG.isDebugEnabled()) {
LOG.debug("Found mapping for key: " + key + " port:" + res);
}
} else {
LOG.warn("Warning, no mapping for key: " + key);
}
return PortmapResponse.intReply(out, xid, res);
}
/**
* This procedure enumerates all entries in the port mapper's database. The
* procedure takes no parameters and returns a list of program, version,
* protocol, and port values.
*/
private XDR dump(int xid, XDR in, XDR out) {
PortmapMapping[] pmapList = map.values().toArray(new PortmapMapping[0]);
return PortmapResponse.pmapList(out, xid, pmapList);
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
RpcInfo info = (RpcInfo) e.getMessage();
RpcCall rpcCall = (RpcCall) info.header();
final int portmapProc = rpcCall.getProcedure();
int xid = rpcCall.getXid();
XDR in = new XDR(info.data().toByteBuffer().asReadOnlyBuffer(),
XDR.State.READING);
XDR out = new XDR();
if (portmapProc == PMAPPROC_NULL) {
out = nullOp(xid, in, out);
} else if (portmapProc == PMAPPROC_SET) {
out = set(xid, in, out);
} else if (portmapProc == PMAPPROC_UNSET) {
out = unset(xid, in, out);
} else if (portmapProc == PMAPPROC_DUMP) {
out = dump(xid, in, out);
} else if (portmapProc == PMAPPROC_GETPORT) {
out = getport(xid, in, out);
} else if (portmapProc == PMAPPROC_GETVERSADDR) {
out = getport(xid, in, out);
} else {
LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc);
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(xid,
RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone());
reply.write(out);
}
ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
}
@Override
public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent e)
throws Exception {
allChannels.add(e.getChannel());
}
@Override
public void channelIdle(ChannelHandlerContext ctx, IdleStateEvent e)
throws Exception {
if (e.getState() == IdleState.ALL_IDLE) {
e.getChannel().close();
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
LOG.warn("Encountered ", e.getCause());
e.getChannel().close();
}
}
| 7,893 | 36.770335 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.portmap;
import org.apache.hadoop.oncrpc.XDR;
/**
* Represents a mapping entry for in the Portmap service for binding RPC
* protocols. See RFC 1833 for details.
*
* This maps a program to a port number.
*/
public class PortmapMapping {
public static final int TRANSPORT_TCP = 6;
public static final int TRANSPORT_UDP = 17;
private final int program;
private final int version;
private final int transport;
private final int port;
public PortmapMapping(int program, int version, int transport, int port) {
this.program = program;
this.version = version;
this.transport = transport;
this.port = port;
}
public XDR serialize(XDR xdr) {
xdr.writeInt(program);
xdr.writeInt(version);
xdr.writeInt(transport);
xdr.writeInt(port);
return xdr;
}
public static PortmapMapping deserialize(XDR xdr) {
return new PortmapMapping(xdr.readInt(), xdr.readInt(), xdr.readInt(),
xdr.readInt());
}
public int getPort() {
return port;
}
public static String key(PortmapMapping mapping) {
return mapping.program + " " + mapping.version + " " + mapping.transport;
}
@Override
public String toString() {
return String.format("(PortmapMapping-%d:%d:%d:%d)", program, version,
transport, port);
}
}
| 2,130 | 29.014085 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.portmap;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.RpcUtil;
import org.apache.hadoop.util.StringUtils;
import org.jboss.netty.bootstrap.ConnectionlessBootstrap;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.group.ChannelGroup;
import org.jboss.netty.channel.group.DefaultChannelGroup;
import org.jboss.netty.channel.socket.nio.NioDatagramChannelFactory;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.jboss.netty.handler.timeout.IdleStateHandler;
import org.jboss.netty.util.HashedWheelTimer;
import com.google.common.annotations.VisibleForTesting;
/**
* Portmap service for binding RPC protocols. See RFC 1833 for details.
*/
final class Portmap {
private static final Log LOG = LogFactory.getLog(Portmap.class);
private static final int DEFAULT_IDLE_TIME_MILLISECONDS = 5000;
private ConnectionlessBootstrap udpServer;
private ServerBootstrap tcpServer;
private ChannelGroup allChannels = new DefaultChannelGroup();
private Channel udpChannel;
private Channel tcpChannel;
private final RpcProgramPortmap handler = new RpcProgramPortmap(allChannels);
public static void main(String[] args) {
StringUtils.startupShutdownMessage(Portmap.class, args, LOG);
final int port = RpcProgram.RPCB_PORT;
Portmap pm = new Portmap();
try {
pm.start(DEFAULT_IDLE_TIME_MILLISECONDS,
new InetSocketAddress(port), new InetSocketAddress(port));
} catch (Throwable e) {
LOG.fatal("Failed to start the server. Cause:", e);
pm.shutdown();
System.exit(-1);
}
}
void shutdown() {
allChannels.close().awaitUninterruptibly();
tcpServer.releaseExternalResources();
udpServer.releaseExternalResources();
}
@VisibleForTesting
SocketAddress getTcpServerLocalAddress() {
return tcpChannel.getLocalAddress();
}
@VisibleForTesting
SocketAddress getUdpServerLoAddress() {
return udpChannel.getLocalAddress();
}
@VisibleForTesting
RpcProgramPortmap getHandler() {
return handler;
}
void start(final int idleTimeMilliSeconds, final SocketAddress tcpAddress,
final SocketAddress udpAddress) {
tcpServer = new ServerBootstrap(new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(), Executors.newCachedThreadPool()));
tcpServer.setPipelineFactory(new ChannelPipelineFactory() {
private final HashedWheelTimer timer = new HashedWheelTimer();
private final IdleStateHandler idleStateHandler = new IdleStateHandler(
timer, 0, 0, idleTimeMilliSeconds, TimeUnit.MILLISECONDS);
@Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(RpcUtil.constructRpcFrameDecoder(),
RpcUtil.STAGE_RPC_MESSAGE_PARSER, idleStateHandler, handler,
RpcUtil.STAGE_RPC_TCP_RESPONSE);
}
});
udpServer = new ConnectionlessBootstrap(new NioDatagramChannelFactory(
Executors.newCachedThreadPool()));
udpServer.setPipeline(Channels.pipeline(RpcUtil.STAGE_RPC_MESSAGE_PARSER,
handler, RpcUtil.STAGE_RPC_UDP_RESPONSE));
tcpChannel = tcpServer.bind(tcpAddress);
udpChannel = udpServer.bind(udpAddress);
allChannels.add(tcpChannel);
allChannels.add(udpChannel);
LOG.info("Portmap server started at tcp://" + tcpChannel.getLocalAddress()
+ ", udp://" + udpChannel.getLocalAddress());
}
}
| 4,711 | 35.8125 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.portmap;
import org.apache.hadoop.oncrpc.RpcCall;
import org.apache.hadoop.oncrpc.RpcUtil;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.CredentialsNone;
import org.apache.hadoop.oncrpc.security.VerifierNone;
/**
* Helper utility for building portmap request
*/
public class PortmapRequest {
public static PortmapMapping mapping(XDR xdr) {
return PortmapMapping.deserialize(xdr);
}
public static XDR create(PortmapMapping mapping, boolean set) {
XDR request = new XDR();
int procedure = set ? RpcProgramPortmap.PMAPPROC_SET
: RpcProgramPortmap.PMAPPROC_UNSET;
RpcCall call = RpcCall.getInstance(
RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION, procedure,
new CredentialsNone(), new VerifierNone());
call.write(request);
return mapping.serialize(request);
}
}
| 1,761 | 37.304348 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/AccessPrivilege.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs;
public enum AccessPrivilege {
READ_ONLY,
READ_WRITE,
NONE;
}
| 905 | 36.75 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsTime.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs;
import org.apache.hadoop.oncrpc.XDR;
/**
* Class that encapsulates time.
*/
public class NfsTime {
static final int MILLISECONDS_IN_SECOND = 1000;
static final int NANOSECONDS_IN_MILLISECOND = 1000000;
private final int seconds;
private final int nseconds;
public NfsTime(int seconds, int nseconds) {
this.seconds = seconds;
this.nseconds = nseconds;
}
public NfsTime(NfsTime other) {
seconds = other.getNseconds();
nseconds = other.getNseconds();
}
public NfsTime(long milliseconds) {
seconds = (int) (milliseconds / MILLISECONDS_IN_SECOND);
nseconds = (int) ((milliseconds - this.seconds * MILLISECONDS_IN_SECOND) *
NANOSECONDS_IN_MILLISECOND);
}
public int getSeconds() {
return seconds;
}
public int getNseconds() {
return nseconds;
}
/**
* Get the total time in milliseconds
*/
public long getMilliSeconds() {
return (long) (seconds) * 1000 + (long) (nseconds) / 1000000;
}
public void serialize(XDR xdr) {
xdr.writeInt(getSeconds());
xdr.writeInt(getNseconds());
}
public static NfsTime deserialize(XDR xdr) {
return new NfsTime(xdr.readInt(), xdr.readInt());
}
@Override
public int hashCode() {
return seconds ^ nseconds;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof NfsTime)) {
return false;
}
return ((NfsTime) o).getMilliSeconds() == this.getMilliSeconds();
}
@Override
public String toString() {
return "(NfsTime-" + seconds + "s, " + nseconds + "ns)";
}
}
| 2,398 | 25.955056 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsFileType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs;
/**
* Class encapsulates different types of files
*/
public enum NfsFileType {
NFSREG(1), // a regular file
NFSDIR(2), // a directory
NFSBLK(3), // a block special device file
NFSCHR(4), // a character special device
NFSLNK(5), // a symbolic link
NFSSOCK(6), // a socket
NFSFIFO(7); // a named pipe
private final int value;
NfsFileType(int val) {
value = val;
}
public int toValue() {
return value;
}
}
| 1,304 | 30.071429 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.net.util.SubnetUtils;
import org.apache.commons.net.util.SubnetUtils.SubnetInfo;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.util.LightWeightCache;
import org.apache.hadoop.util.LightWeightGSet;
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Preconditions;
/**
* This class provides functionality for loading and checking the mapping
* between client hosts and their access privileges.
*/
public class NfsExports {
private static NfsExports exports = null;
public static synchronized NfsExports getInstance(Configuration conf) {
if (exports == null) {
String matchHosts = conf.get(
CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY,
CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
int cacheSize = conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY,
Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT);
long expirationPeriodNano = conf.getLong(
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
try {
exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
} catch (IllegalArgumentException e) {
LOG.error("Invalid NFS Exports provided: ", e);
return exports;
}
}
return exports;
}
public static final Log LOG = LogFactory.getLog(NfsExports.class);
// only support IPv4 now
private static final String IP_ADDRESS =
"(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})";
private static final String SLASH_FORMAT_SHORT = IP_ADDRESS + "/(\\d{1,3})";
private static final String SLASH_FORMAT_LONG = IP_ADDRESS + "/" + IP_ADDRESS;
private static final Pattern CIDR_FORMAT_SHORT =
Pattern.compile(SLASH_FORMAT_SHORT);
private static final Pattern CIDR_FORMAT_LONG =
Pattern.compile(SLASH_FORMAT_LONG);
// Hostnames are composed of series of 'labels' concatenated with dots.
// Labels can be between 1-63 characters long, and can only take
// letters, digits & hyphens. They cannot start and end with hyphens. For
// more details, refer RFC-1123 & http://en.wikipedia.org/wiki/Hostname
private static final String LABEL_FORMAT =
"[a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?";
private static final Pattern HOSTNAME_FORMAT =
Pattern.compile("^(" + LABEL_FORMAT + "\\.)*" + LABEL_FORMAT + "$");
static class AccessCacheEntry implements LightWeightCache.Entry{
private final String hostAddr;
private AccessPrivilege access;
private final long expirationTime;
private LightWeightGSet.LinkedElement next;
AccessCacheEntry(String hostAddr, AccessPrivilege access,
long expirationTime) {
Preconditions.checkArgument(hostAddr != null);
this.hostAddr = hostAddr;
this.access = access;
this.expirationTime = expirationTime;
}
@Override
public int hashCode() {
return hostAddr.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof AccessCacheEntry) {
AccessCacheEntry entry = (AccessCacheEntry) obj;
return this.hostAddr.equals(entry.hostAddr);
}
return false;
}
@Override
public void setNext(LinkedElement next) {
this.next = next;
}
@Override
public LinkedElement getNext() {
return this.next;
}
@Override
public void setExpirationTime(long timeNano) {
// we set expiration time in the constructor, and the expiration time
// does not change
}
@Override
public long getExpirationTime() {
return this.expirationTime;
}
}
private final List<Match> mMatches;
private final LightWeightCache<AccessCacheEntry, AccessCacheEntry> accessCache;
private final long cacheExpirationPeriod;
/**
* Constructor.
* @param cacheSize The size of the access privilege cache.
* @param expirationPeriodNano The period
* @param matchingHosts A string specifying one or multiple matchers.
*/
NfsExports(int cacheSize, long expirationPeriodNano, String matchHosts) {
this.cacheExpirationPeriod = expirationPeriodNano;
accessCache = new LightWeightCache<AccessCacheEntry, AccessCacheEntry>(
cacheSize, cacheSize, expirationPeriodNano, 0);
String[] matchStrings = matchHosts.split(
CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR);
mMatches = new ArrayList<Match>(matchStrings.length);
for(String mStr : matchStrings) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing match string '" + mStr + "'");
}
mStr = mStr.trim();
if(!mStr.isEmpty()) {
mMatches.add(getMatch(mStr));
}
}
}
/**
* Return the configured group list
*/
public String[] getHostGroupList() {
int listSize = mMatches.size();
String[] hostGroups = new String[listSize];
for (int i = 0; i < mMatches.size(); i++) {
hostGroups[i] = mMatches.get(i).getHostGroup();
}
return hostGroups;
}
public AccessPrivilege getAccessPrivilege(InetAddress addr) {
return getAccessPrivilege(addr.getHostAddress(),
addr.getCanonicalHostName());
}
AccessPrivilege getAccessPrivilege(String address, String hostname) {
long now = System.nanoTime();
AccessCacheEntry newEntry = new AccessCacheEntry(address,
AccessPrivilege.NONE, now + this.cacheExpirationPeriod);
// check if there is a cache entry for the given address
AccessCacheEntry cachedEntry = accessCache.get(newEntry);
if (cachedEntry != null && now < cachedEntry.expirationTime) {
// get a non-expired cache entry, use it
return cachedEntry.access;
} else {
for(Match match : mMatches) {
if(match.isIncluded(address, hostname)) {
if (match.accessPrivilege == AccessPrivilege.READ_ONLY) {
newEntry.access = AccessPrivilege.READ_ONLY;
break;
} else if (match.accessPrivilege == AccessPrivilege.READ_WRITE) {
newEntry.access = AccessPrivilege.READ_WRITE;
}
}
}
accessCache.put(newEntry);
return newEntry.access;
}
}
private static abstract class Match {
private final AccessPrivilege accessPrivilege;
private Match(AccessPrivilege accessPrivilege) {
this.accessPrivilege = accessPrivilege;
}
public abstract boolean isIncluded(String address, String hostname);
public abstract String getHostGroup();
}
/**
* Matcher covering all client hosts (specified by "*")
*/
private static class AnonymousMatch extends Match {
private AnonymousMatch(AccessPrivilege accessPrivilege) {
super(accessPrivilege);
}
@Override
public boolean isIncluded(String address, String hostname) {
return true;
}
@Override
public String getHostGroup() {
return "*";
}
}
/**
* Matcher using CIDR for client host matching
*/
private static class CIDRMatch extends Match {
private final SubnetInfo subnetInfo;
private CIDRMatch(AccessPrivilege accessPrivilege, SubnetInfo subnetInfo) {
super(accessPrivilege);
this.subnetInfo = subnetInfo;
}
@Override
public boolean isIncluded(String address, String hostname) {
if(subnetInfo.isInRange(address)) {
if(LOG.isDebugEnabled()) {
LOG.debug("CIDRNMatcher low = " + subnetInfo.getLowAddress() +
", high = " + subnetInfo.getHighAddress() +
", allowing client '" + address + "', '" + hostname + "'");
}
return true;
}
if(LOG.isDebugEnabled()) {
LOG.debug("CIDRNMatcher low = " + subnetInfo.getLowAddress() +
", high = " + subnetInfo.getHighAddress() +
", denying client '" + address + "', '" + hostname + "'");
}
return false;
}
@Override
public String getHostGroup() {
return subnetInfo.getAddress() + "/" + subnetInfo.getNetmask();
}
}
/**
* Matcher requiring exact string match for client host
*/
private static class ExactMatch extends Match {
private final String ipOrHost;
private ExactMatch(AccessPrivilege accessPrivilege, String ipOrHost) {
super(accessPrivilege);
this.ipOrHost = ipOrHost;
}
@Override
public boolean isIncluded(String address, String hostname) {
if(ipOrHost.equalsIgnoreCase(address) ||
ipOrHost.equalsIgnoreCase(hostname)) {
if(LOG.isDebugEnabled()) {
LOG.debug("ExactMatcher '" + ipOrHost + "', allowing client " +
"'" + address + "', '" + hostname + "'");
}
return true;
}
if(LOG.isDebugEnabled()) {
LOG.debug("ExactMatcher '" + ipOrHost + "', denying client " +
"'" + address + "', '" + hostname + "'");
}
return false;
}
@Override
public String getHostGroup() {
return ipOrHost;
}
}
/**
* Matcher where client hosts are specified by regular expression
*/
private static class RegexMatch extends Match {
private final Pattern pattern;
private RegexMatch(AccessPrivilege accessPrivilege, String wildcard) {
super(accessPrivilege);
this.pattern = Pattern.compile(wildcard, Pattern.CASE_INSENSITIVE);
}
@Override
public boolean isIncluded(String address, String hostname) {
if (pattern.matcher(address).matches()
|| pattern.matcher(hostname).matches()) {
if (LOG.isDebugEnabled()) {
LOG.debug("RegexMatcher '" + pattern.pattern()
+ "', allowing client '" + address + "', '" + hostname + "'");
}
return true;
}
if (LOG.isDebugEnabled()) {
LOG.debug("RegexMatcher '" + pattern.pattern()
+ "', denying client '" + address + "', '" + hostname + "'");
}
return false;
}
@Override
public String getHostGroup() {
return pattern.toString();
}
}
/**
* Loading a matcher from a string. The default access privilege is read-only.
* The string contains 1 or 2 parts, separated by whitespace characters, where
* the first part specifies the client hosts, and the second part (if
* existent) specifies the access privilege of the client hosts. I.e.,
*
* "client-hosts [access-privilege]"
*/
private static Match getMatch(String line) {
String[] parts = line.split("\\s+");
final String host;
AccessPrivilege privilege = AccessPrivilege.READ_ONLY;
switch (parts.length) {
case 1:
host = StringUtils.toLowerCase(parts[0]).trim();
break;
case 2:
host = StringUtils.toLowerCase(parts[0]).trim();
String option = parts[1].trim();
if ("rw".equalsIgnoreCase(option)) {
privilege = AccessPrivilege.READ_WRITE;
}
break;
default:
throw new IllegalArgumentException("Incorrectly formatted line '" + line
+ "'");
}
if (host.equals("*")) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using match all for '" + host + "' and " + privilege);
}
return new AnonymousMatch(privilege);
} else if (CIDR_FORMAT_SHORT.matcher(host).matches()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using CIDR match for '" + host + "' and " + privilege);
}
return new CIDRMatch(privilege, new SubnetUtils(host).getInfo());
} else if (CIDR_FORMAT_LONG.matcher(host).matches()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using CIDR match for '" + host + "' and " + privilege);
}
String[] pair = host.split("/");
return new CIDRMatch(privilege,
new SubnetUtils(pair[0], pair[1]).getInfo());
} else if (host.contains("*") || host.contains("?") || host.contains("[")
|| host.contains("]") || host.contains("(") || host.contains(")")) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using Regex match for '" + host + "' and " + privilege);
}
return new RegexMatch(privilege, host);
} else if (HOSTNAME_FORMAT.matcher(host).matches()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using exact match for '" + host + "' and " + privilege);
}
return new ExactMatch(privilege, host);
} else {
throw new IllegalArgumentException("Invalid hostname provided '" + host
+ "'");
}
}
}
| 13,763 | 32.570732 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager;
import static org.apache.hadoop.util.ExitUtil.terminate;
/**
* Nfs server. Supports NFS v3 using {@link RpcProgram}.
* Only TCP server is supported and UDP is not supported.
*/
public abstract class Nfs3Base {
public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
private final RpcProgram rpcProgram;
private int nfsBoundPort; // Will set after server starts
public RpcProgram getRpcProgram() {
return rpcProgram;
}
protected Nfs3Base(RpcProgram rpcProgram, Configuration conf) {
this.rpcProgram = rpcProgram;
LOG.info("NFS server port set to: " + rpcProgram.getPort());
}
public void start(boolean register) {
startTCPServer(); // Start TCP server
if (register) {
ShutdownHookManager.get().addShutdownHook(new NfsShutdownHook(),
SHUTDOWN_HOOK_PRIORITY);
try {
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
} catch (Throwable e) {
LOG.fatal("Failed to register the NFSv3 service.", e);
terminate(1, e);
}
}
}
private void startTCPServer() {
SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
rpcProgram, 0);
rpcProgram.startDaemons();
try {
tcpServer.run();
} catch (Throwable e) {
LOG.fatal("Failed to start the TCP server.", e);
if (tcpServer.getBoundPort() > 0) {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP,
tcpServer.getBoundPort());
}
tcpServer.shutdown();
terminate(1, e);
}
nfsBoundPort = tcpServer.getBoundPort();
}
/**
* Priority of the nfsd shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 10;
private class NfsShutdownHook implements Runnable {
@Override
public synchronized void run() {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
rpcProgram.stopDaemons();
}
}
}
| 3,082 | 31.797872 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3;
/**
* Some constants for NFSv3
*/
public class Nfs3Constant {
// The local rpcbind/portmapper port.
public final static int SUN_RPCBIND = 111;
// The RPC program number for NFS.
public final static int PROGRAM = 100003;
// The program version number that this server implements.
public final static int VERSION = 3;
// The procedures
public static enum NFSPROC3 {
// the order of the values below are significant.
NULL,
GETATTR,
SETATTR,
LOOKUP,
ACCESS,
READLINK,
READ,
WRITE,
CREATE(false),
MKDIR(false),
SYMLINK(false),
MKNOD(false),
REMOVE(false),
RMDIR(false),
RENAME(false),
LINK(false),
READDIR,
READDIRPLUS,
FSSTAT,
FSINFO,
PATHCONF,
COMMIT;
private final boolean isIdempotent;
private NFSPROC3(boolean isIdempotent) {
this.isIdempotent = isIdempotent;
}
private NFSPROC3() {
this(true);
}
public boolean isIdempotent() {
return isIdempotent;
}
/** @return the int value representing the procedure. */
public int getValue() {
return ordinal();
}
/** @return the procedure corresponding to the value. */
public static NFSPROC3 fromValue(int value) {
if (value < 0 || value >= values().length) {
return null;
}
return values()[value];
}
}
// The maximum size in bytes of the opaque file handle.
public final static int NFS3_FHSIZE = 64;
// The byte size of cookie verifier passed by READDIR and READDIRPLUS.
public final static int NFS3_COOKIEVERFSIZE = 8;
// The size in bytes of the opaque verifier used for exclusive CREATE.
public final static int NFS3_CREATEVERFSIZE = 8;
// The size in bytes of the opaque verifier used for asynchronous WRITE.
public final static int NFS3_WRITEVERFSIZE = 8;
/** Access call request mode */
// File access mode
public static final int ACCESS_MODE_READ = 0x04;
public static final int ACCESS_MODE_WRITE = 0x02;
public static final int ACCESS_MODE_EXECUTE = 0x01;
/** Access call response rights */
// Read data from file or read a directory.
public final static int ACCESS3_READ = 0x0001;
// Look up a name in a directory (no meaning for non-directory objects).
public final static int ACCESS3_LOOKUP = 0x0002;
// Rewrite existing file data or modify existing directory entries.
public final static int ACCESS3_MODIFY = 0x0004;
// Write new data or add directory entries.
public final static int ACCESS3_EXTEND = 0x0008;
// Delete an existing directory entry.
public final static int ACCESS3_DELETE = 0x0010;
// Execute file (no meaning for a directory).
public final static int ACCESS3_EXECUTE = 0x0020;
/** File and directory attribute mode bits */
// Set user ID on execution.
public final static int MODE_S_ISUID = 0x00800;
// Set group ID on execution.
public final static int MODE_S_ISGID = 0x00400;
// Save swapped text (not defined in POSIX).
public final static int MODE_S_ISVTX = 0x00200;
// Read permission for owner.
public final static int MODE_S_IRUSR = 0x00100;
// Write permission for owner.
public final static int MODE_S_IWUSR = 0x00080;
// Execute permission for owner on a file. Or lookup (search) permission for
// owner in directory.
public final static int MODE_S_IXUSR = 0x00040;
// Read permission for group.
public final static int MODE_S_IRGRP = 0x00020;
// Write permission for group.
public final static int MODE_S_IWGRP = 0x00010;
// Execute permission for group on a file. Or lookup (search) permission for
// group in directory.
public final static int MODE_S_IXGRP = 0x00008;
// Read permission for others.
public final static int MODE_S_IROTH = 0x00004;
// Write permission for others.
public final static int MODE_S_IWOTH = 0x00002;
// Execute permission for others on a file. Or lookup (search) permission for
// others in directory.
public final static int MODE_S_IXOTH = 0x00001;
public final static int MODE_ALL = MODE_S_ISUID | MODE_S_ISGID | MODE_S_ISVTX
| MODE_S_ISVTX | MODE_S_IRUSR | MODE_S_IRUSR | MODE_S_IWUSR
| MODE_S_IXUSR | MODE_S_IRGRP | MODE_S_IWGRP | MODE_S_IXGRP
| MODE_S_IROTH | MODE_S_IWOTH | MODE_S_IXOTH;
/** Write call flavors */
public enum WriteStableHow {
// the order of the values below are significant.
UNSTABLE,
DATA_SYNC,
FILE_SYNC;
public int getValue() {
return ordinal();
}
public static WriteStableHow fromValue(int id) {
return values()[id];
}
}
/**
* This is a cookie that the client can use to determine whether the server
* has changed state between a call to WRITE and a subsequent call to either
* WRITE or COMMIT. This cookie must be consistent during a single instance of
* the NFS version 3 protocol service and must be unique between instances of
* the NFS version 3 protocol server, where uncommitted data may be lost.
*/
public final static long WRITE_COMMIT_VERF = System.currentTimeMillis();
/** FileSystemProperties */
public final static int FSF3_LINK = 0x0001;
public final static int FSF3_SYMLINK = 0x0002;
public final static int FSF3_HOMOGENEOUS = 0x0008;
public final static int FSF3_CANSETTIME = 0x0010;
/** Create options */
public final static int CREATE_UNCHECKED = 0;
public final static int CREATE_GUARDED = 1;
public final static int CREATE_EXCLUSIVE = 2;
/** Size for nfs exports cache */
public static final String NFS_EXPORTS_CACHE_SIZE_KEY = "nfs.exports.cache.size";
public static final int NFS_EXPORTS_CACHE_SIZE_DEFAULT = 512;
/** Expiration time for nfs exports cache entry */
public static final String NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "nfs.exports.cache.expirytime.millis";
public static final long NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
}
| 6,754 | 33.28934 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/FileHandle.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3;
import java.nio.ByteBuffer;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.XDR;
/**
* This is a file handle use by the NFS clients.
* Server returns this handle to the client, which is used by the client
* on subsequent operations to reference the file.
*/
public class FileHandle {
private static final Log LOG = LogFactory.getLog(FileHandle.class);
private static final String HEXES = "0123456789abcdef";
private static final int HANDLE_LEN = 32;
private byte[] handle; // Opaque handle
private long fileId = -1;
public FileHandle() {
handle = null;
}
/**
* Handle is a 32 bytes number. For HDFS, the last 8 bytes is fileId.
*/
public FileHandle(long v) {
fileId = v;
handle = new byte[HANDLE_LEN];
handle[0] = (byte)(v >>> 56);
handle[1] = (byte)(v >>> 48);
handle[2] = (byte)(v >>> 40);
handle[3] = (byte)(v >>> 32);
handle[4] = (byte)(v >>> 24);
handle[5] = (byte)(v >>> 16);
handle[6] = (byte)(v >>> 8);
handle[7] = (byte)(v >>> 0);
for (int i = 8; i < HANDLE_LEN; i++) {
handle[i] = (byte) 0;
}
}
public FileHandle(String s) {
MessageDigest digest;
try {
digest = MessageDigest.getInstance("MD5");
handle = new byte[HANDLE_LEN];
} catch (NoSuchAlgorithmException e) {
LOG.warn("MD5 MessageDigest unavailable.");
handle = null;
return;
}
byte[] in = s.getBytes(Charsets.UTF_8);
digest.update(in);
byte[] digestbytes = digest.digest();
for (int i = 0; i < 16; i++) {
handle[i] = (byte) 0;
}
for (int i = 16; i < 32; i++) {
handle[i] = digestbytes[i - 16];
}
}
public boolean serialize(XDR out) {
out.writeInt(handle.length);
out.writeFixedOpaque(handle);
return true;
}
private long bytesToLong(byte[] data) {
ByteBuffer buffer = ByteBuffer.allocate(8);
for (int i = 0; i < 8; i++) {
buffer.put(data[i]);
}
buffer.flip();// need flip
return buffer.getLong();
}
public boolean deserialize(XDR xdr) {
if (!XDR.verifyLength(xdr, 32)) {
return false;
}
int size = xdr.readInt();
handle = xdr.readFixedOpaque(size);
fileId = bytesToLong(handle);
return true;
}
private static String hex(byte b) {
StringBuilder strBuilder = new StringBuilder();
strBuilder.append(HEXES.charAt((b & 0xF0) >> 4)).append(
HEXES.charAt((b & 0x0F)));
return strBuilder.toString();
}
public long getFileId() {
return fileId;
}
public byte[] getContent() {
return handle.clone();
}
@Override
public String toString() {
StringBuilder s = new StringBuilder();
for (int i = 0; i < handle.length; i++) {
s.append(hex(handle[i]));
}
return s.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof FileHandle)) {
return false;
}
FileHandle h = (FileHandle) o;
return Arrays.equals(handle, h.handle);
}
@Override
public int hashCode() {
return Arrays.hashCode(handle);
}
}
| 4,174 | 25.592357 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Status.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3;
/**
* Success or error status is reported in NFS3 responses.
*/
public class Nfs3Status {
/** Indicates the call completed successfully. */
public final static int NFS3_OK = 0;
/**
* The operation was not allowed because the caller is either not a
* privileged user (root) or not the owner of the target of the operation.
*/
public final static int NFS3ERR_PERM = 1;
/**
* No such file or directory. The file or directory name specified does not
* exist.
*/
public final static int NFS3ERR_NOENT = 2;
/**
* I/O error. A hard error (for example, a disk error) occurred while
* processing the requested operation.
*/
public final static int NFS3ERR_IO = 5;
/** I/O error. No such device or address. */
public final static int NFS3ERR_NXIO = 6;
/**
* Permission denied. The caller does not have the correct permission to
* perform the requested operation. Contrast this with NFS3ERR_PERM, which
* restricts itself to owner or privileged user permission failures.
*/
public final static int NFS3ERR_ACCES = 13;
/** File exists. The file specified already exists. */
public final static int NFS3ERR_EXIST = 17;
/** Attempt to do a cross-device hard link. */
public final static int NFS3ERR_XDEV = 18;
/** No such device. */
public final static int NFS3ERR_NODEV = 19;
/** The caller specified a non-directory in a directory operation. */
public static int NFS3ERR_NOTDIR = 20;
/** The caller specified a directory in a non-directory operation. */
public final static int NFS3ERR_ISDIR = 21;
/**
* Invalid argument or unsupported argument for an operation. Two examples are
* attempting a READLINK on an object other than a symbolic link or attempting
* to SETATTR a time field on a server that does not support this operation.
*/
public final static int NFS3ERR_INVAL = 22;
/**
* File too large. The operation would have caused a file to grow beyond the
* server's limit.
*/
public final static int NFS3ERR_FBIG = 27;
/**
* No space left on device. The operation would have caused the server's file
* system to exceed its limit.
*/
public final static int NFS3ERR_NOSPC = 28;
/**
* Read-only file system. A modifying operation was attempted on a read-only
* file system.
*/
public final static int NFS3ERR_ROFS = 30;
/** Too many hard links. */
public final static int NFS3ERR_MLINK = 31;
/** The filename in an operation was too long. */
public final static int NFS3ERR_NAMETOOLONG = 63;
/** An attempt was made to remove a directory that was not empty. */
public final static int NFS3ERR_NOTEMPTY = 66;
/**
* Resource (quota) hard limit exceeded. The user's resource limit on the
* server has been exceeded.
*/
public final static int NFS3ERR_DQUOT = 69;
/**
* The file handle given in the arguments was invalid. The file referred to by
* that file handle no longer exists or access to it has been revoked.
*/
public final static int NFS3ERR_STALE = 70;
/**
* The file handle given in the arguments referred to a file on a non-local
* file system on the server.
*/
public final static int NFS3ERR_REMOTE = 71;
/** The file handle failed internal consistency checks */
public final static int NFS3ERR_BADHANDLE = 10001;
/**
* Update synchronization mismatch was detected during a SETATTR operation.
*/
public final static int NFS3ERR_NOT_SYNC = 10002;
/** READDIR or READDIRPLUS cookie is stale */
public final static int NFS3ERR_BAD_COOKIE = 10003;
/** Operation is not supported */
public final static int NFS3ERR_NOTSUPP = 10004;
/** Buffer or request is too small */
public final static int NFS3ERR_TOOSMALL = 10005;
/**
* An error occurred on the server which does not map to any of the legal NFS
* version 3 protocol error values. The client should translate this into an
* appropriate error. UNIX clients may choose to translate this to EIO.
*/
public final static int NFS3ERR_SERVERFAULT = 10006;
/**
* An attempt was made to create an object of a type not supported by the
* server.
*/
public final static int NFS3ERR_BADTYPE = 10007;
/**
* The server initiated the request, but was not able to complete it in a
* timely fashion. The client should wait and then try the request with a new
* RPC transaction ID. For example, this error should be returned from a
* server that supports hierarchical storage and receives a request to process
* a file that has been migrated. In this case, the server should start the
* immigration process and respond to client with this error.
*/
public final static int NFS3ERR_JUKEBOX = 10008;
}
| 5,636 | 33.582822 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3;
import org.apache.hadoop.nfs.nfs3.response.NFS3Response;
import org.apache.hadoop.oncrpc.RpcInfo;
import org.apache.hadoop.oncrpc.XDR;
/**
* RPC procedures as defined in RFC 1813.
*/
public interface Nfs3Interface {
/** NULL: Do nothing */
public NFS3Response nullProcedure();
/** GETATTR: Get file attributes */
public NFS3Response getattr(XDR xdr, RpcInfo info);
/** SETATTR: Set file attributes */
public NFS3Response setattr(XDR xdr, RpcInfo info);
/** LOOKUP: Lookup filename */
public NFS3Response lookup(XDR xdr, RpcInfo info);
/** ACCESS: Check access permission */
public NFS3Response access(XDR xdr, RpcInfo info);
/** READLINK: Read from symbolic link */
public NFS3Response readlink(XDR xdr, RpcInfo info);
/** READ: Read from file */
public NFS3Response read(XDR xdr, RpcInfo info);
/** WRITE: Write to file */
public NFS3Response write(XDR xdr, RpcInfo info);
/** CREATE: Create a file */
public NFS3Response create(XDR xdr, RpcInfo info);
/** MKDIR: Create a directory */
public NFS3Response mkdir(XDR xdr, RpcInfo info);
/** SYMLINK: Create a symbolic link */
public NFS3Response symlink(XDR xdr, RpcInfo info);
/** MKNOD: Create a special device */
public NFS3Response mknod(XDR xdr, RpcInfo info);
/** REMOVE: Remove a file */
public NFS3Response remove(XDR xdr, RpcInfo info);
/** RMDIR: Remove a directory */
public NFS3Response rmdir(XDR xdr, RpcInfo info);
/** RENAME: Rename a file or directory */
public NFS3Response rename(XDR xdr, RpcInfo info);
/** LINK: create link to an object */
public NFS3Response link(XDR xdr, RpcInfo info);
/** READDIR: Read From directory */
public NFS3Response readdir(XDR xdr, RpcInfo info);
/** READDIRPLUS: Extended read from directory */
public NFS3Response readdirplus(XDR xdr, RpcInfo info);
/** FSSTAT: Get dynamic file system information */
public NFS3Response fsstat(XDR xdr, RpcInfo info);
/** FSINFO: Get static file system information */
public NFS3Response fsinfo(XDR xdr, RpcInfo info);
/** PATHCONF: Retrieve POSIX information */
public NFS3Response pathconf(XDR xdr, RpcInfo info);
/** COMMIT: Commit cached data on a server to stable storage */
public NFS3Response commit(XDR xdr, RpcInfo info);
}
| 3,130 | 31.957895 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3;
import org.apache.hadoop.nfs.NfsFileType;
import org.apache.hadoop.nfs.NfsTime;
import org.apache.hadoop.nfs.nfs3.response.WccAttr;
import org.apache.hadoop.oncrpc.XDR;
/**
* File attrbutes reported in NFS.
*/
public class Nfs3FileAttributes {
private int type;
private int mode;
private int nlink;
private int uid;
private int gid;
private long size;
private long used;
private Specdata3 rdev;
private long fsid;
private long fileId;
private NfsTime atime;
private NfsTime mtime;
private NfsTime ctime;
/*
* The interpretation of the two words depends on the type of file system
* object. For a block special (NF3BLK) or character special (NF3CHR) file,
* specdata1 and specdata2 are the major and minor device numbers,
* respectively. (This is obviously a UNIX-specific interpretation.) For all
* other file types, these two elements should either be set to 0 or the
* values should be agreed upon by the client and server. If the client and
* server do not agree upon the values, the client should treat these fields
* as if they are set to 0.
*/
public static class Specdata3 {
final int specdata1;
final int specdata2;
public Specdata3() {
specdata1 = 0;
specdata2 = 0;
}
public Specdata3(int specdata1, int specdata2) {
this.specdata1 = specdata1;
this.specdata2 = specdata2;
}
public int getSpecdata1() {
return specdata1;
}
public int getSpecdata2() {
return specdata2;
}
@Override
public String toString() {
return "(Specdata3: specdata1" + specdata1 + ", specdata2:" + specdata2
+ ")";
}
}
public Nfs3FileAttributes() {
this(NfsFileType.NFSREG, 1, (short)0, 0, 0, 0, 0, 0, 0, 0, new Specdata3());
}
public Nfs3FileAttributes(NfsFileType nfsType, int nlink, short mode, int uid,
int gid, long size, long fsid, long fileId, long mtime, long atime, Specdata3 rdev) {
this.type = nfsType.toValue();
this.mode = mode;
this.nlink = nlink;
this.uid = uid;
this.gid = gid;
this.size = size;
this.used = this.size;
this.rdev = new Specdata3();
this.fsid = fsid;
this.fileId = fileId;
this.mtime = new NfsTime(mtime);
this.atime = atime != 0 ? new NfsTime(atime) : this.mtime;
this.ctime = this.mtime;
this.rdev = rdev;
}
public Nfs3FileAttributes(Nfs3FileAttributes other) {
this.type = other.getType();
this.mode = other.getMode();
this.nlink = other.getNlink();
this.uid = other.getUid();
this.gid = other.getGid();
this.size = other.getSize();
this.used = other.getUsed();
this.rdev = new Specdata3();
this.fsid = other.getFsid();
this.fileId = other.getFileId();
this.mtime = new NfsTime(other.getMtime());
this.atime = new NfsTime(other.getAtime());
this.ctime = new NfsTime(other.getCtime());
}
public void serialize(XDR xdr) {
xdr.writeInt(type);
xdr.writeInt(mode);
xdr.writeInt(nlink);
xdr.writeInt(uid);
xdr.writeInt(gid);
xdr.writeLongAsHyper(size);
xdr.writeLongAsHyper(used);
xdr.writeInt(rdev.getSpecdata1());
xdr.writeInt(rdev.getSpecdata2());
xdr.writeLongAsHyper(fsid);
xdr.writeLongAsHyper(fileId);
atime.serialize(xdr);
mtime.serialize(xdr);
ctime.serialize(xdr);
}
public static Nfs3FileAttributes deserialize(XDR xdr) {
Nfs3FileAttributes attr = new Nfs3FileAttributes();
attr.type = xdr.readInt();
attr.mode = xdr.readInt();
attr.nlink = xdr.readInt();
attr.uid = xdr.readInt();
attr.gid = xdr.readInt();
attr.size = xdr.readHyper();
attr.used = xdr.readHyper();
attr.rdev = new Specdata3(xdr.readInt(), xdr.readInt());
attr.fsid = xdr.readHyper();
attr.fileId = xdr.readHyper();
attr.atime = NfsTime.deserialize(xdr);
attr.mtime = NfsTime.deserialize(xdr);
attr.ctime = NfsTime.deserialize(xdr);
return attr;
}
@Override
public String toString() {
return String.format("type:%d, mode:%d, nlink:%d, uid:%d, gid:%d, " +
"size:%d, used:%d, rdev:%s, fsid:%d, fileid:%d, atime:%s, " +
"mtime:%s, ctime:%s",
type, mode, nlink, uid, gid, size, used, rdev, fsid, fileId, atime,
mtime, ctime);
}
public int getNlink() {
return nlink;
}
public long getUsed() {
return used;
}
public long getFsid() {
return fsid;
}
public long getFileId() {
return fileId;
}
public NfsTime getAtime() {
return atime;
}
public NfsTime getMtime() {
return mtime;
}
public NfsTime getCtime() {
return ctime;
}
public int getType() {
return type;
}
public WccAttr getWccAttr() {
return new WccAttr(size, mtime, ctime);
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
public void setUsed(long used) {
this.used = used;
}
public int getMode() {
return this.mode;
}
public int getUid() {
return this.uid;
}
public int getGid() {
return this.gid;
}
public Specdata3 getRdev() {
return rdev;
}
public void setRdev(Specdata3 rdev) {
this.rdev = rdev;
}
}
| 6,114 | 25.357759 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LINK3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* LINK3 Request
*/
public class LINK3Request extends RequestWithHandle {
private final FileHandle fromDirHandle;
private final String fromName;
public LINK3Request(FileHandle handle, FileHandle fromDirHandle,
String fromName) {
super(handle);
this.fromDirHandle = fromDirHandle;
this.fromName = fromName;
}
public static LINK3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
FileHandle fromDirHandle = readHandle(xdr);
String fromName = xdr.readString();
return new LINK3Request(handle, fromDirHandle, fromName);
}
public FileHandle getFromDirHandle() {
return fromDirHandle;
}
public String getFromName() {
return fromName;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
fromDirHandle.serialize(xdr);
xdr.writeInt(fromName.length());
xdr.writeFixedOpaque(fromName.getBytes(Charsets.UTF_8), fromName.length());
}
}
| 1,973 | 30.333333 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIRPLUS3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* READDIRPLUS3 Request
*/
public class READDIRPLUS3Request extends RequestWithHandle {
private final long cookie;
private final long cookieVerf;
private final int dirCount;
private final int maxCount;
public static READDIRPLUS3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
long cookie = xdr.readHyper();
long cookieVerf = xdr.readHyper();
int dirCount = xdr.readInt();
int maxCount = xdr.readInt();
return new READDIRPLUS3Request(handle, cookie, cookieVerf, dirCount,
maxCount);
}
public READDIRPLUS3Request(FileHandle handle, long cookie, long cookieVerf,
int dirCount, int maxCount) {
super(handle);
this.cookie = cookie;
this.cookieVerf = cookieVerf;
this.dirCount = dirCount;
this.maxCount = maxCount;
}
public long getCookie() {
return this.cookie;
}
public long getCookieVerf() {
return this.cookieVerf;
}
public int getDirCount() {
return dirCount;
}
public int getMaxCount() {
return maxCount;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeLongAsHyper(cookie);
xdr.writeLongAsHyper(cookieVerf);
xdr.writeInt(dirCount);
xdr.writeInt(maxCount);
}
}
| 2,238 | 28.077922 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/PATHCONF3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* PATHCONF3 Request
*/
public class PATHCONF3Request extends RequestWithHandle {
public static PATHCONF3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new PATHCONF3Request(handle);
}
public PATHCONF3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}
| 1,369 | 30.860465 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SETATTR3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.NfsTime;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* SETATTR3 Request
*/
public class SETATTR3Request extends RequestWithHandle {
private final SetAttr3 attr;
/* A client may request that the server check that the object is in an
* expected state before performing the SETATTR operation. If guard.check is
* TRUE, the server must compare the value of ctime to the current ctime of
* the object. If the values are different, the server must preserve the
* object attributes and must return a status of NFS3ERR_NOT_SYNC. If check is
* FALSE, the server will not perform this check.
*/
private final boolean check;
private final NfsTime ctime;
public static SETATTR3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
SetAttr3 attr = new SetAttr3();
attr.deserialize(xdr);
boolean check = xdr.readBoolean();
NfsTime ctime;
if (check) {
ctime = NfsTime.deserialize(xdr);
} else {
ctime = null;
}
return new SETATTR3Request(handle, attr, check, ctime);
}
public SETATTR3Request(FileHandle handle, SetAttr3 attr, boolean check,
NfsTime ctime) {
super(handle);
this.attr = attr;
this.check = check;
this.ctime = ctime;
}
public SetAttr3 getAttr() {
return attr;
}
public boolean isCheck() {
return check;
}
public NfsTime getCtime() {
return ctime;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
attr.serialize(xdr);
xdr.writeBoolean(check);
if (check) {
ctime.serialize(xdr);
}
}
}
| 2,562 | 29.152941 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKNOD3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.nfs.NfsFileType;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes.Specdata3;
import org.apache.hadoop.oncrpc.XDR;
/**
* MKNOD3 Request
*/
public class MKNOD3Request extends RequestWithHandle {
private final String name;
private int type;
private SetAttr3 objAttr = null;
private Specdata3 spec = null;
public MKNOD3Request(FileHandle handle, String name, int type,
SetAttr3 objAttr, Specdata3 spec) {
super(handle);
this.name = name;
this.type = type;
this.objAttr = objAttr;
this.spec = spec;
}
public static MKNOD3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
int type = xdr.readInt();
SetAttr3 objAttr = new SetAttr3();
Specdata3 spec = null;
if (type == NfsFileType.NFSCHR.toValue()
|| type == NfsFileType.NFSBLK.toValue()) {
objAttr.deserialize(xdr);
spec = new Specdata3(xdr.readInt(), xdr.readInt());
} else if (type == NfsFileType.NFSSOCK.toValue()
|| type == NfsFileType.NFSFIFO.toValue()) {
objAttr.deserialize(xdr);
}
return new MKNOD3Request(handle, name, type, objAttr, spec);
}
public String getName() {
return name;
}
public int getType() {
return type;
}
public SetAttr3 getObjAttr() {
return objAttr;
}
public Specdata3 getSpec() {
return spec;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.length());
xdr.writeFixedOpaque(name.getBytes(Charsets.UTF_8), name.length());
objAttr.serialize(xdr);
if (spec != null) {
xdr.writeInt(spec.getSpecdata1());
xdr.writeInt(spec.getSpecdata2());
}
}
}
| 2,712 | 28.813187 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/NFS3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* An NFS request that uses {@link FileHandle} to identify a file.
*/
public abstract class NFS3Request {
/**
* Deserialize a handle from an XDR object
*/
static FileHandle readHandle(XDR xdr) throws IOException {
FileHandle handle = new FileHandle();
if (!handle.deserialize(xdr)) {
throw new IOException("can't deserialize file handle");
}
return handle;
}
/**
* Subclass should implement. Usually handle is the first to be serialized
*/
public abstract void serialize(XDR xdr);
}
| 1,503 | 31.695652 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READLINK3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* READLINK3 Request
*/
public class READLINK3Request extends RequestWithHandle {
public static READLINK3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new READLINK3Request(handle);
}
public READLINK3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}
| 1,372 | 30.930233 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/COMMIT3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* COMMIT3 Request
*/
public class COMMIT3Request extends RequestWithHandle {
private final long offset;
private final int count;
public static COMMIT3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
long offset = xdr.readHyper();
int count = xdr.readInt();
return new COMMIT3Request(handle, offset, count);
}
public COMMIT3Request(FileHandle handle, long offset, int count) {
super(handle);
this.offset = offset;
this.count = count;
}
public long getOffset() {
return this.offset;
}
public int getCount() {
return this.count;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeLongAsHyper(offset);
xdr.writeInt(count);
}
}
| 1,741 | 28.525424 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSINFO3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* FSINFO3 Request
*/
public class FSINFO3Request extends RequestWithHandle {
public static FSINFO3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new FSINFO3Request(handle);
}
public FSINFO3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}
| 1,360 | 31.404762 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* SYMLINK3 Request
*/
public class SYMLINK3Request extends RequestWithHandle {
private final String name; // The name of the link
private final SetAttr3 symAttr;
private final String symData; // It contains the target
public static SYMLINK3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
SetAttr3 symAttr = new SetAttr3();
symAttr.deserialize(xdr);
String symData = xdr.readString();
return new SYMLINK3Request(handle, name, symAttr, symData);
}
public SYMLINK3Request(FileHandle handle, String name, SetAttr3 symAttr,
String symData) {
super(handle);
this.name = name;
this.symAttr = symAttr;
this.symData = symData;
}
public String getName() {
return name;
}
public SetAttr3 getSymAttr() {
return symAttr;
}
public String getSymData() {
return symData;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.getBytes(Charsets.UTF_8).length);
xdr.writeFixedOpaque(name.getBytes(Charsets.UTF_8));
symAttr.serialize(xdr);
xdr.writeInt(symData.getBytes(Charsets.UTF_8).length);
xdr.writeFixedOpaque(symData.getBytes(Charsets.UTF_8));
}
}
| 2,276 | 30.625 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
import com.google.common.annotations.VisibleForTesting;
/**
* LOOKUP3 Request
*/
public class LOOKUP3Request extends RequestWithHandle {
private String name;
public LOOKUP3Request(FileHandle handle, String name) {
super(handle);
this.name = name;
}
public static LOOKUP3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
return new LOOKUP3Request(handle, name);
}
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
@Override
@VisibleForTesting
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.getBytes(Charsets.UTF_8).length);
xdr.writeFixedOpaque(name.getBytes(Charsets.UTF_8));
}
}
| 1,806 | 29.116667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/CREATE3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.oncrpc.XDR;
/**
* CREATE3 Request
*/
public class CREATE3Request extends RequestWithHandle {
private final String name;
private final int mode;
private final SetAttr3 objAttr;
private long verf = 0;
public CREATE3Request(FileHandle handle, String name, int mode,
SetAttr3 objAttr, long verf) {
super(handle);
this.name = name;
this.mode = mode;
this.objAttr = objAttr;
this.verf = verf;
}
public static CREATE3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
int mode = xdr.readInt();
SetAttr3 objAttr = new SetAttr3();
long verf = 0;
if ((mode == Nfs3Constant.CREATE_UNCHECKED)
|| (mode == Nfs3Constant.CREATE_GUARDED)) {
objAttr.deserialize(xdr);
} else if (mode == Nfs3Constant.CREATE_EXCLUSIVE) {
verf = xdr.readHyper();
} else {
throw new IOException("Wrong create mode:" + mode);
}
return new CREATE3Request(handle, name, mode, objAttr, verf);
}
public String getName() {
return name;
}
public int getMode() {
return mode;
}
public SetAttr3 getObjAttr() {
return objAttr;
}
public long getVerf() {
return verf;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.length());
xdr.writeFixedOpaque(name.getBytes(Charsets.UTF_8), name.length());
xdr.writeInt(mode);
objAttr.serialize(xdr);
}
}
| 2,519 | 27.965517 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/GETATTR3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* GETATTR3 Request
*/
public class GETATTR3Request extends RequestWithHandle {
public static GETATTR3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new GETATTR3Request(handle);
}
public GETATTR3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}
| 1,365 | 31.52381 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RequestWithHandle.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import org.apache.hadoop.nfs.nfs3.FileHandle;
/**
* An NFS request that uses {@link FileHandle} to identify a file.
*/
public abstract class RequestWithHandle extends NFS3Request {
protected final FileHandle handle;
RequestWithHandle(FileHandle handle) {
this.handle = handle;
}
public FileHandle getHandle() {
return this.handle;
}
}
| 1,212 | 32.694444 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.util.EnumSet;
import org.apache.hadoop.nfs.NfsTime;
import org.apache.hadoop.oncrpc.XDR;
/**
* SetAttr3 contains the file attributes that can be set from the client. The
* fields are the same as the similarly named fields in the NFS3Attributes
* structure.
*/
public class SetAttr3 {
// Options for time stamp change
public static final int TIME_DONT_CHANGE = 0;
public static final int TIME_SET_TO_SERVER_TIME = 1;
public static final int TIME_SET_TO_CLIENT_TIME = 2;
private int mode;
private int uid;
private int gid;
private long size;
private NfsTime atime;
private NfsTime mtime;
private EnumSet<SetAttrField> updateFields;
public static enum SetAttrField {
MODE, UID, GID, SIZE, ATIME, MTIME
};
public SetAttr3() {
mode = 0;
uid = 0;
gid = 0;
size = 0;
updateFields = EnumSet.noneOf(SetAttrField.class);
}
public SetAttr3(int mode, int uid, int gid, long size, NfsTime atime,
NfsTime mtime, EnumSet<SetAttrField> updateFields) {
this.mode = mode;
this.uid = uid;
this.gid = gid;
this.size = size;
this.updateFields = updateFields;
}
public int getMode() {
return mode;
}
public int getUid() {
return uid;
}
public int getGid() {
return gid;
}
public void setGid(int gid) {
this.gid = gid;
}
public long getSize() {
return size;
}
public NfsTime getAtime() {
return atime;
}
public NfsTime getMtime() {
return mtime;
}
public EnumSet<SetAttrField> getUpdateFields() {
return updateFields;
}
public void setUpdateFields(EnumSet<SetAttrField> updateFields) {
this.updateFields = updateFields;
}
public void serialize(XDR xdr) {
if (!updateFields.contains(SetAttrField.MODE)) {
xdr.writeBoolean(false);
} else {
xdr.writeBoolean(true);
xdr.writeInt(mode);
}
if (!updateFields.contains(SetAttrField.UID)) {
xdr.writeBoolean(false);
} else {
xdr.writeBoolean(true);
xdr.writeInt(uid);
}
if (!updateFields.contains(SetAttrField.GID)) {
xdr.writeBoolean(false);
} else {
xdr.writeBoolean(true);
xdr.writeInt(gid);
}
if (!updateFields.contains(SetAttrField.SIZE)) {
xdr.writeBoolean(false);
} else {
xdr.writeBoolean(true);
xdr.writeLongAsHyper(size);
}
if (!updateFields.contains(SetAttrField.ATIME)) {
xdr.writeBoolean(false);
} else {
xdr.writeBoolean(true);
atime.serialize(xdr);
}
if (!updateFields.contains(SetAttrField.MTIME)) {
xdr.writeBoolean(false);
} else {
xdr.writeBoolean(true);
mtime.serialize(xdr);
}
}
public void deserialize(XDR xdr) {
if (xdr.readBoolean()) {
mode = xdr.readInt();
updateFields.add(SetAttrField.MODE);
}
if (xdr.readBoolean()) {
uid = xdr.readInt();
updateFields.add(SetAttrField.UID);
}
if (xdr.readBoolean()) {
gid = xdr.readInt();
updateFields.add(SetAttrField.GID);
}
if (xdr.readBoolean()) {
size = xdr.readHyper();
updateFields.add(SetAttrField.SIZE);
}
int timeSetHow = xdr.readInt();
if (timeSetHow == TIME_SET_TO_CLIENT_TIME) {
atime = NfsTime.deserialize(xdr);
updateFields.add(SetAttrField.ATIME);
} else if (timeSetHow == TIME_SET_TO_SERVER_TIME) {
atime = new NfsTime(System.currentTimeMillis());
updateFields.add(SetAttrField.ATIME);
}
timeSetHow = xdr.readInt();
if (timeSetHow == TIME_SET_TO_CLIENT_TIME) {
mtime = NfsTime.deserialize(xdr);
updateFields.add(SetAttrField.MTIME);
} else if (timeSetHow == TIME_SET_TO_SERVER_TIME) {
mtime = new NfsTime(System.currentTimeMillis());
updateFields.add(SetAttrField.MTIME);
}
}
}
| 4,675 | 25.269663 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RENAME3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* RENAME3 Request
*/
public class RENAME3Request extends NFS3Request {
private final FileHandle fromDirHandle;
private final String fromName;
private final FileHandle toDirHandle;
private final String toName;
public static RENAME3Request deserialize(XDR xdr) throws IOException {
FileHandle fromDirHandle = readHandle(xdr);
String fromName = xdr.readString();
FileHandle toDirHandle = readHandle(xdr);
String toName = xdr.readString();
return new RENAME3Request(fromDirHandle, fromName, toDirHandle, toName);
}
public RENAME3Request(FileHandle fromDirHandle, String fromName,
FileHandle toDirHandle, String toName) {
this.fromDirHandle = fromDirHandle;
this.fromName = fromName;
this.toDirHandle = toDirHandle;
this.toName = toName;
}
public FileHandle getFromDirHandle() {
return fromDirHandle;
}
public String getFromName() {
return fromName;
}
public FileHandle getToDirHandle() {
return toDirHandle;
}
public String getToName() {
return toName;
}
@Override
public void serialize(XDR xdr) {
fromDirHandle.serialize(xdr);
xdr.writeInt(fromName.getBytes(Charsets.UTF_8).length);
xdr.writeFixedOpaque(fromName.getBytes(Charsets.UTF_8));
toDirHandle.serialize(xdr);
xdr.writeInt(toName.getBytes(Charsets.UTF_8).length);
xdr.writeFixedOpaque(toName.getBytes(Charsets.UTF_8));
}
}
| 2,420 | 30.855263 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/REMOVE3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* REMOVE3 Request
*/
public class REMOVE3Request extends RequestWithHandle {
private final String name;
public static REMOVE3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
return new REMOVE3Request(handle, name);
}
public REMOVE3Request(FileHandle handle, String name) {
super(handle);
this.name = name;
}
public String getName() {
return this.name;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.getBytes(Charsets.UTF_8).length);
xdr.writeFixedOpaque(name.getBytes(Charsets.UTF_8));
}
}
| 1,670 | 30.528302 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKDIR3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* MKDIR3 Request
*/
public class MKDIR3Request extends RequestWithHandle {
private final String name;
private final SetAttr3 objAttr;
public static MKDIR3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
SetAttr3 objAttr = new SetAttr3();
objAttr.deserialize(xdr);
return new MKDIR3Request(handle, name, objAttr);
}
public MKDIR3Request(FileHandle handle, String name, SetAttr3 objAttr) {
super(handle);
this.name = name;
this.objAttr = objAttr;
}
public String getName() {
return name;
}
public SetAttr3 getObjAttr() {
return objAttr;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.getBytes(Charsets.UTF_8).length);
xdr.writeFixedOpaque(name.getBytes(Charsets.UTF_8));
objAttr.serialize(xdr);
}
}
| 1,902 | 29.693548 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RMDIR3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* RMDIR3 Request
*/
public class RMDIR3Request extends RequestWithHandle {
private final String name;
public static RMDIR3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
return new RMDIR3Request(handle, name);
}
public RMDIR3Request(FileHandle handle, String name) {
super(handle);
this.name = name;
}
public String getName() {
return this.name;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeInt(name.getBytes(Charsets.UTF_8).length);
xdr.writeFixedOpaque(name.getBytes(Charsets.UTF_8));
}
}
| 1,665 | 30.433962 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.oncrpc.XDR;
/**
* WRITE3 Request
*/
public class WRITE3Request extends RequestWithHandle {
private long offset;
private int count;
private final WriteStableHow stableHow;
private final ByteBuffer data;
public static WRITE3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
long offset = xdr.readHyper();
int count = xdr.readInt();
WriteStableHow stableHow = WriteStableHow.fromValue(xdr.readInt());
ByteBuffer data = ByteBuffer.wrap(xdr.readFixedOpaque(xdr.readInt()));
return new WRITE3Request(handle, offset, count, stableHow, data);
}
public WRITE3Request(FileHandle handle, final long offset, final int count,
final WriteStableHow stableHow, final ByteBuffer data) {
super(handle);
this.offset = offset;
this.count = count;
this.stableHow = stableHow;
this.data = data;
}
public long getOffset() {
return this.offset;
}
public void setOffset(long offset) {
this.offset = offset;
}
public int getCount() {
return this.count;
}
public void setCount(int count) {
this.count = count;
}
public WriteStableHow getStableHow() {
return this.stableHow;
}
public ByteBuffer getData() {
return this.data;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeLongAsHyper(offset);
xdr.writeInt(count);
xdr.writeInt(stableHow.getValue());
xdr.writeInt(count);
xdr.writeFixedOpaque(data.array(), count);
}
@Override
public String toString() {
return String.format("fileId: %d offset: %d count: %d stableHow: %s",
handle.getFileId(), offset, count, stableHow.name());
}
}
| 2,743 | 28.505376 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
import com.google.common.annotations.VisibleForTesting;
/**
* READ3 Request
*/
public class READ3Request extends RequestWithHandle {
private final long offset;
private final int count;
public static READ3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
long offset = xdr.readHyper();
int count = xdr.readInt();
return new READ3Request(handle, offset, count);
}
@VisibleForTesting
public READ3Request(FileHandle handle, long offset, int count) {
super(handle);
this.offset = offset;
this.count = count;
}
public long getOffset() {
return this.offset;
}
public int getCount() {
return this.count;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeLongAsHyper(offset);
xdr.writeInt(count);
}
}
| 1,810 | 28.209677 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/ACCESS3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* ACCESS3 Request
*/
public class ACCESS3Request extends RequestWithHandle {
public static ACCESS3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new ACCESS3Request(handle);
}
public ACCESS3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}
| 1,361 | 30.674419 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIR3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* READDIR3 Request
*/
public class READDIR3Request extends RequestWithHandle {
private final long cookie;
private final long cookieVerf;
private final int count;
public static READDIR3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
long cookie = xdr.readHyper();
long cookieVerf = xdr.readHyper();
int count = xdr.readInt();
return new READDIR3Request(handle, cookie, cookieVerf, count);
}
public READDIR3Request(FileHandle handle, long cookie, long cookieVerf,
int count) {
super(handle);
this.cookie = cookie;
this.cookieVerf = cookieVerf;
this.count = count;
}
public long getCookie() {
return this.cookie;
}
public long getCookieVerf() {
return this.cookieVerf;
}
public long getCount() {
return this.count;
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
xdr.writeLongAsHyper(cookie);
xdr.writeLongAsHyper(cookieVerf);
xdr.writeInt(count);
}
}
| 1,990 | 28.279412 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSSTAT3Request.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.request;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.oncrpc.XDR;
/**
* FSSTAT3 Request
*/
public class FSSTAT3Request extends RequestWithHandle {
public static FSSTAT3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
return new FSSTAT3Request(handle);
}
public FSSTAT3Request(FileHandle handle) {
super(handle);
}
@Override
public void serialize(XDR xdr) {
handle.serialize(xdr);
}
}
| 1,360 | 31.404762 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.nfs.nfs3.response.READDIR3Response.DirList3;
import org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
import com.google.common.annotations.VisibleForTesting;
/**
* READDIRPLUS3 Response
*/
public class READDIRPLUS3Response extends NFS3Response {
private Nfs3FileAttributes postOpDirAttr;
private final long cookieVerf;
private final DirListPlus3 dirListPlus;
public static class EntryPlus3 {
private final long fileId;
private final String name;
private final long cookie;
private final Nfs3FileAttributes nameAttr;
private final FileHandle objFileHandle;
public EntryPlus3(long fileId, String name, long cookie,
Nfs3FileAttributes nameAttr, FileHandle objFileHandle) {
this.fileId = fileId;
this.name = name;
this.cookie = cookie;
this.nameAttr = nameAttr;
this.objFileHandle = objFileHandle;
}
@VisibleForTesting
public String getName() {
return name;
}
static EntryPlus3 deseralize(XDR xdr) {
long fileId = xdr.readHyper();
String name = xdr.readString();
long cookie = xdr.readHyper();
xdr.readBoolean();
Nfs3FileAttributes nameAttr = Nfs3FileAttributes.deserialize(xdr);
FileHandle objFileHandle = new FileHandle();
objFileHandle.deserialize(xdr);
return new EntryPlus3(fileId, name, cookie, nameAttr, objFileHandle);
}
void seralize(XDR xdr) {
xdr.writeLongAsHyper(fileId);
xdr.writeString(name);
xdr.writeLongAsHyper(cookie);
xdr.writeBoolean(true);
nameAttr.serialize(xdr);
xdr.writeBoolean(true);
objFileHandle.serialize(xdr);
}
}
public static class DirListPlus3 {
List<EntryPlus3> entries;
boolean eof;
public DirListPlus3(EntryPlus3[] entries, boolean eof) {
this.entries = Collections.unmodifiableList(Arrays.asList(entries));
this.eof = eof;
}
@VisibleForTesting
public List<EntryPlus3> getEntries() {
return entries;
}
boolean getEof() {
return eof;
}
}
@VisibleForTesting
public DirListPlus3 getDirListPlus() {
return dirListPlus;
}
public READDIRPLUS3Response(int status) {
this(status, null, 0, null);
}
public READDIRPLUS3Response(int status, Nfs3FileAttributes postOpDirAttr,
final long cookieVerf, final DirListPlus3 dirListPlus) {
super(status);
this.postOpDirAttr = postOpDirAttr;
this.cookieVerf = cookieVerf;
this.dirListPlus = dirListPlus;
}
public static READDIRPLUS3Response deserialize(XDR xdr) {
int status = xdr.readInt();
xdr.readBoolean();
Nfs3FileAttributes postOpDirAttr = Nfs3FileAttributes.deserialize(xdr);
long cookieVerf = 0;
ArrayList<EntryPlus3> entries = new ArrayList<EntryPlus3>();
DirListPlus3 dirList = null;
if (status == Nfs3Status.NFS3_OK) {
cookieVerf = xdr.readHyper();
while (xdr.readBoolean()) {
EntryPlus3 e = EntryPlus3.deseralize(xdr);
entries.add(e);
}
boolean eof = xdr.readBoolean();
EntryPlus3[] allEntries = new EntryPlus3[entries.size()];
entries.toArray(allEntries);
dirList = new DirListPlus3(allEntries, eof);
}
return new READDIRPLUS3Response(status, postOpDirAttr, cookieVerf, dirList);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
out.writeBoolean(true); // attributes follow
if (postOpDirAttr == null) {
postOpDirAttr = new Nfs3FileAttributes();
}
postOpDirAttr.serialize(out);
if (getStatus() == Nfs3Status.NFS3_OK) {
out.writeLongAsHyper(cookieVerf);
for (EntryPlus3 f : dirListPlus.getEntries()) {
out.writeBoolean(true); // next
f.seralize(out);
}
out.writeBoolean(false);
out.writeBoolean(dirListPlus.getEof());
}
return out;
}
}
| 5,137 | 29.951807 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* ACCESS3 Response
*/
public class ACCESS3Response extends NFS3Response {
/*
* A bit mask of access permissions indicating access rights for the
* authentication credentials provided with the request.
*/
private final int access;
private final Nfs3FileAttributes postOpAttr;
public ACCESS3Response(int status) {
this(status, new Nfs3FileAttributes(), 0);
}
public ACCESS3Response(int status, Nfs3FileAttributes postOpAttr, int access) {
super(status);
this.postOpAttr = postOpAttr;
this.access = access;
}
public static ACCESS3Response deserialize(XDR xdr) {
int status = xdr.readInt();
Nfs3FileAttributes postOpAttr = null;
int access = 0;
if (status == Nfs3Status.NFS3_OK) {
postOpAttr = Nfs3FileAttributes.deserialize(xdr);
access = xdr.readInt();
}
return new ACCESS3Response(status, postOpAttr, access);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
if (this.getStatus() == Nfs3Status.NFS3_OK) {
out.writeBoolean(true);
postOpAttr.serialize(out);
out.writeInt(access);
} else {
out.writeBoolean(false);
}
return out;
}
}
| 2,275 | 31.514286 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LINK3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
public class LINK3Response extends NFS3Response {
private final WccData fromDirWcc;
private final WccData linkDirWcc;
public LINK3Response(int status) {
this(status, new WccData(null, null), new WccData(null, null));
}
public LINK3Response(int status, WccData fromDirWcc,
WccData linkDirWcc) {
super(status);
this.fromDirWcc = fromDirWcc;
this.linkDirWcc = linkDirWcc;
}
public WccData getFromDirWcc() {
return fromDirWcc;
}
public WccData getLinkDirWcc() {
return linkDirWcc;
}
public static LINK3Response deserialize(XDR xdr) {
int status = xdr.readInt();
WccData fromDirWcc = WccData.deserialize(xdr);
WccData linkDirWcc = WccData.deserialize(xdr);
return new LINK3Response(status, fromDirWcc, linkDirWcc);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
fromDirWcc.serialize(out);
linkDirWcc.serialize(out);
return out;
}
}
| 2,040 | 30.4 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
import com.google.common.annotations.VisibleForTesting;
/**
* READDIR3 Response
*/
public class READDIR3Response extends NFS3Response {
private final Nfs3FileAttributes postOpDirAttr;
private final long cookieVerf;
private final DirList3 dirList;
public static class Entry3 {
private final long fileId;
private final String name;
private final long cookie;
public Entry3(long fileId, String name, long cookie) {
this.fileId = fileId;
this.name = name;
this.cookie = cookie;
}
long getFileId() {
return fileId;
}
@VisibleForTesting
public String getName() {
return name;
}
long getCookie() {
return cookie;
}
static Entry3 deserialzie(XDR xdr) {
long fileId = xdr.readHyper();
String name = xdr.readString();
long cookie = xdr.readHyper();
return new Entry3(fileId, name, cookie);
}
void seralize(XDR xdr) {
xdr.writeLongAsHyper(getFileId());
xdr.writeString(getName());
xdr.writeLongAsHyper(getCookie());
}
}
public static class DirList3 {
final List<Entry3> entries;
final boolean eof;
public DirList3(Entry3[] entries, boolean eof) {
this.entries = Collections.unmodifiableList(Arrays.asList(entries));
this.eof = eof;
}
@VisibleForTesting
public List<Entry3> getEntries() {
return this.entries;
}
}
public READDIR3Response(int status) {
this(status, new Nfs3FileAttributes());
}
public READDIR3Response(int status, Nfs3FileAttributes postOpAttr) {
this(status, postOpAttr, 0, null);
}
public READDIR3Response(int status, Nfs3FileAttributes postOpAttr,
final long cookieVerf, final DirList3 dirList) {
super(status);
this.postOpDirAttr = postOpAttr;
this.cookieVerf = cookieVerf;
this.dirList = dirList;
}
public Nfs3FileAttributes getPostOpAttr() {
return postOpDirAttr;
}
public long getCookieVerf() {
return cookieVerf;
}
public DirList3 getDirList() {
return dirList;
}
public static READDIR3Response deserialize(XDR xdr) {
int status = xdr.readInt();
xdr.readBoolean();
Nfs3FileAttributes postOpDirAttr = Nfs3FileAttributes.deserialize(xdr);
long cookieVerf = 0;
ArrayList<Entry3> entries = new ArrayList<Entry3>();
DirList3 dirList = null;
if (status == Nfs3Status.NFS3_OK) {
cookieVerf = xdr.readHyper();
while (xdr.readBoolean()) {
Entry3 e = Entry3.deserialzie(xdr);
entries.add(e);
}
boolean eof = xdr.readBoolean();
Entry3[] allEntries = new Entry3[entries.size()];
entries.toArray(allEntries);
dirList = new DirList3(allEntries, eof);
}
return new READDIR3Response(status, postOpDirAttr, cookieVerf, dirList);
}
@Override
public XDR serialize(XDR xdr, int xid, Verifier verifier) {
super.serialize(xdr, xid, verifier);
xdr.writeBoolean(true); // Attributes follow
postOpDirAttr.serialize(xdr);
if (getStatus() == Nfs3Status.NFS3_OK) {
xdr.writeLongAsHyper(cookieVerf);
for (Entry3 e : dirList.entries) {
xdr.writeBoolean(true); // Value follows
e.seralize(xdr);
}
xdr.writeBoolean(false);
xdr.writeBoolean(dirList.eof);
}
return xdr;
}
}
| 4,509 | 26.668712 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/GETATTR3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* GETATTR3 Response
*/
public class GETATTR3Response extends NFS3Response {
private Nfs3FileAttributes postOpAttr;
public GETATTR3Response(int status) {
this(status, new Nfs3FileAttributes());
}
public GETATTR3Response(int status, Nfs3FileAttributes attrs) {
super(status);
this.postOpAttr = attrs;
}
public void setPostOpAttr(Nfs3FileAttributes postOpAttr) {
this.postOpAttr = postOpAttr;
}
public static GETATTR3Response deserialize(XDR xdr) {
int status = xdr.readInt();
Nfs3FileAttributes attr = (status == Nfs3Status.NFS3_OK) ? Nfs3FileAttributes
.deserialize(xdr) : new Nfs3FileAttributes();
return new GETATTR3Response(status, attr);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
if (getStatus() == Nfs3Status.NFS3_OK) {
postOpAttr.serialize(out);
}
return out;
}
}
| 1,975 | 33.068966 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSINFO3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.NfsTime;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* FSINFO3 Response
*/
public class FSINFO3Response extends NFS3Response {
private final Nfs3FileAttributes postOpAttr;
/*
* The maximum size in bytes of a READ request supported by the server. Any
* READ with a number greater than rtmax will result in a short read of rtmax
* bytes or less.
*/
private final int rtmax;
/*
* The preferred size of a READ request. This should be the same as rtmax
* unless there is a clear benefit in performance or efficiency.
*/
private final int rtpref;
/* The suggested multiple for the size of a READ request. */
private final int rtmult;
/*
* The maximum size of a WRITE request supported by the server. In general,
* the client is limited by wtmax since there is no guarantee that a server
* can handle a larger write. Any WRITE with a count greater than wtmax will
* result in a short write of at most wtmax bytes.
*/
private final int wtmax;
/*
* The preferred size of a WRITE request. This should be the same as wtmax
* unless there is a clear benefit in performance or efficiency.
*/
private final int wtpref;
/*
* The suggested multiple for the size of a WRITE request.
*/
private final int wtmult;
/* The preferred size of a READDIR request. */
private final int dtpref;
/* The maximum size of a file on the file system. */
private final long maxFileSize;
/*
* The server time granularity. When setting a file time using SETATTR, the
* server guarantees only to preserve times to this accuracy. If this is {0,
* 1}, the server can support nanosecond times, {0, 1000000} denotes
* millisecond precision, and {1, 0} indicates that times are accurate only to
* the nearest second.
*/
private final NfsTime timeDelta;
/*
* A bit mask of file system properties. The following values are defined:
*
* FSF_LINK If this bit is 1 (TRUE), the file system supports hard links.
*
* FSF_SYMLINK If this bit is 1 (TRUE), the file system supports symbolic
* links.
*
* FSF_HOMOGENEOUS If this bit is 1 (TRUE), the information returned by
* PATHCONF is identical for every file and directory in the file system. If
* it is 0 (FALSE), the client should retrieve PATHCONF information for each
* file and directory as required.
*
* FSF_CANSETTIME If this bit is 1 (TRUE), the server will set the times for a
* file via SETATTR if requested (to the accuracy indicated by time_delta). If
* it is 0 (FALSE), the server cannot set times as requested.
*/
private final int properties;
public FSINFO3Response(int status) {
this(status, new Nfs3FileAttributes(), 0, 0, 0, 0, 0, 0, 0, 0, null, 0);
}
public FSINFO3Response(int status, Nfs3FileAttributes postOpAttr, int rtmax,
int rtpref, int rtmult, int wtmax, int wtpref, int wtmult, int dtpref,
long maxFileSize, NfsTime timeDelta, int properties) {
super(status);
this.postOpAttr = postOpAttr;
this.rtmax = rtmax;
this.rtpref = rtpref;
this.rtmult = rtmult;
this.wtmax = wtmax;
this.wtpref = wtpref;
this.wtmult = wtmult;
this.dtpref = dtpref;
this.maxFileSize = maxFileSize;
this.timeDelta = timeDelta;
this.properties = properties;
}
public static FSINFO3Response deserialize(XDR xdr) {
int status = xdr.readInt();
xdr.readBoolean();
Nfs3FileAttributes postOpObjAttr = Nfs3FileAttributes.deserialize(xdr);
int rtmax = 0;
int rtpref = 0;
int rtmult = 0;
int wtmax = 0;
int wtpref = 0;
int wtmult = 0;
int dtpref = 0;
long maxFileSize = 0;
NfsTime timeDelta = null;
int properties = 0;
if (status == Nfs3Status.NFS3_OK) {
rtmax = xdr.readInt();
rtpref = xdr.readInt();
rtmult = xdr.readInt();
wtmax = xdr.readInt();
wtpref = xdr.readInt();
wtmult = xdr.readInt();
dtpref = xdr.readInt();
maxFileSize = xdr.readHyper();
timeDelta = NfsTime.deserialize(xdr);
properties = xdr.readInt();
}
return new FSINFO3Response(status, postOpObjAttr, rtmax, rtpref, rtmult,
wtmax, wtpref, wtmult, dtpref, maxFileSize, timeDelta, properties);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
out.writeBoolean(true);
postOpAttr.serialize(out);
if (getStatus() == Nfs3Status.NFS3_OK) {
out.writeInt(rtmax);
out.writeInt(rtpref);
out.writeInt(rtmult);
out.writeInt(wtmax);
out.writeInt(wtpref);
out.writeInt(wtmult);
out.writeInt(dtpref);
out.writeLongAsHyper(maxFileSize);
timeDelta.serialize(out);
out.writeInt(properties);
}
return out;
}
}
| 5,856 | 34.283133 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/WccData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.oncrpc.XDR;
/**
* WccData saved information used by client for weak cache consistency
*/
public class WccData {
private WccAttr preOpAttr;
private Nfs3FileAttributes postOpAttr;
public WccAttr getPreOpAttr() {
return preOpAttr;
}
public void setPreOpAttr(WccAttr preOpAttr) {
this.preOpAttr = preOpAttr;
}
public Nfs3FileAttributes getPostOpAttr() {
return postOpAttr;
}
public void setPostOpAttr(Nfs3FileAttributes postOpAttr) {
this.postOpAttr = postOpAttr;
}
public WccData(WccAttr preOpAttr, Nfs3FileAttributes postOpAttr) {
this.preOpAttr = (preOpAttr == null) ? new WccAttr() : preOpAttr;
this.postOpAttr = (postOpAttr == null) ? new Nfs3FileAttributes()
: postOpAttr;
}
public static WccData deserialize(XDR xdr) {
xdr.readBoolean();
WccAttr preOpAttr = WccAttr.deserialize(xdr);
xdr.readBoolean();
Nfs3FileAttributes postOpAttr = Nfs3FileAttributes.deserialize(xdr);
return new WccData(preOpAttr, postOpAttr);
}
public void serialize(XDR out) {
out.writeBoolean(true); // attributes follow
preOpAttr.serialize(out);
out.writeBoolean(true); // attributes follow
postOpAttr.serialize(out);
}
}
| 2,141 | 31.454545 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/CREATE3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* CREATE3 Response
*/
public class CREATE3Response extends NFS3Response {
private final FileHandle objHandle;
private final Nfs3FileAttributes postOpObjAttr;
private WccData dirWcc;
public CREATE3Response(int status) {
this(status, null, null, null);
}
public CREATE3Response(int status, FileHandle handle,
Nfs3FileAttributes postOpObjAttr, WccData dirWcc) {
super(status);
this.objHandle = handle;
this.postOpObjAttr = postOpObjAttr;
this.dirWcc = dirWcc;
}
public FileHandle getObjHandle() {
return objHandle;
}
public Nfs3FileAttributes getPostOpObjAttr() {
return postOpObjAttr;
}
public WccData getDirWcc() {
return dirWcc;
}
public static CREATE3Response deserialize(XDR xdr) {
int status = xdr.readInt();
FileHandle objHandle = new FileHandle();
Nfs3FileAttributes postOpObjAttr = null;
if (status == Nfs3Status.NFS3_OK) {
xdr.readBoolean();
objHandle.deserialize(xdr);
xdr.readBoolean();
postOpObjAttr = Nfs3FileAttributes.deserialize(xdr);
}
WccData dirWcc = WccData.deserialize(xdr);
return new CREATE3Response(status, objHandle, postOpObjAttr, dirWcc);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
if (getStatus() == Nfs3Status.NFS3_OK) {
out.writeBoolean(true); // Handle follows
objHandle.serialize(out);
out.writeBoolean(true); // Attributes follow
postOpObjAttr.serialize(out);
}
if (dirWcc == null) {
dirWcc = new WccData(null, null);
}
dirWcc.serialize(out);
return out;
}
}
| 2,743 | 29.488889 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/REMOVE3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* REMOVE3 Response
*/
public class REMOVE3Response extends NFS3Response {
private WccData dirWcc;
public REMOVE3Response(int status) {
this(status, null);
}
public REMOVE3Response(int status, WccData dirWcc) {
super(status);
this.dirWcc = dirWcc;
}
public static REMOVE3Response deserialize(XDR xdr) {
int status = xdr.readInt();
WccData dirWcc = WccData.deserialize(xdr);
return new REMOVE3Response(status, dirWcc);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
if (dirWcc == null) {
dirWcc = new WccData(null, null);
}
dirWcc.serialize(out);
return out;
}
}
| 1,746 | 30.763636 | 75 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.