repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* KMS REST and JSON constants and utility methods for the KMSServer.
*/
@InterfaceAudience.Private
public class KMSRESTConstants {
public static final String SERVICE_VERSION = "/v1";
public static final String KEY_RESOURCE = "key";
public static final String KEYS_RESOURCE = "keys";
public static final String KEYS_METADATA_RESOURCE = KEYS_RESOURCE +
"/metadata";
public static final String KEYS_NAMES_RESOURCE = KEYS_RESOURCE + "/names";
public static final String KEY_VERSION_RESOURCE = "keyversion";
public static final String METADATA_SUB_RESOURCE = "_metadata";
public static final String VERSIONS_SUB_RESOURCE = "_versions";
public static final String EEK_SUB_RESOURCE = "_eek";
public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
public static final String KEY = "key";
public static final String EEK_OP = "eek_op";
public static final String EEK_GENERATE = "generate";
public static final String EEK_DECRYPT = "decrypt";
public static final String EEK_NUM_KEYS = "num_keys";
public static final String IV_FIELD = "iv";
public static final String NAME_FIELD = "name";
public static final String CIPHER_FIELD = "cipher";
public static final String LENGTH_FIELD = "length";
public static final String DESCRIPTION_FIELD = "description";
public static final String ATTRIBUTES_FIELD = "attributes";
public static final String CREATED_FIELD = "created";
public static final String VERSIONS_FIELD = "versions";
public static final String MATERIAL_FIELD = "material";
public static final String VERSION_NAME_FIELD = "versionName";
public static final String ENCRYPTED_KEY_VERSION_FIELD =
"encryptedKeyVersion";
public static final String ERROR_EXCEPTION_JSON = "exception";
public static final String ERROR_MESSAGE_JSON = "message";
}
| 2,748 | 42.634921 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsNetgroupMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ExitCodeException;
import org.apache.hadoop.security.NetgroupCache;
/**
* A simple shell-based implementation of {@link GroupMappingServiceProvider}
* that exec's the <code>groups</code> shell command to fetch the group
* memberships of a given user.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class ShellBasedUnixGroupsNetgroupMapping
extends ShellBasedUnixGroupsMapping {
private static final Log LOG =
LogFactory.getLog(ShellBasedUnixGroupsNetgroupMapping.class);
/**
* Get unix groups (parent) and netgroups for given user
*
* @param user get groups and netgroups for this user
* @return groups and netgroups for user
*/
@Override
public List<String> getGroups(String user) throws IOException {
// parent get unix groups
List<String> groups = new LinkedList<String>(super.getGroups(user));
NetgroupCache.getNetgroups(user, groups);
return groups;
}
/**
* Refresh the netgroup cache
*/
@Override
public void cacheGroupsRefresh() throws IOException {
List<String> groups = NetgroupCache.getNetgroupNames();
NetgroupCache.clear();
cacheGroupsAdd(groups);
}
/**
* Add a group to cache, only netgroups are cached
*
* @param groups list of group names to add to cache
*/
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
for(String group: groups) {
if(group.length() == 0) {
// better safe than sorry (should never happen)
} else if(group.charAt(0) == '@') {
if(!NetgroupCache.isCached(group)) {
NetgroupCache.add(group, getUsersForNetgroup(group));
}
} else {
// unix group, not caching
}
}
}
/**
* Gets users for a netgroup
*
* @param netgroup return users for this netgroup
* @return list of users for a given netgroup
*/
protected List<String> getUsersForNetgroup(String netgroup)
throws IOException {
List<String> users = new LinkedList<String>();
// returns a string similar to this:
// group ( , user, ) ( domain, user1, host.com )
String usersRaw = execShellGetUserForNetgroup(netgroup);
// get rid of spaces, makes splitting much easier
usersRaw = usersRaw.replaceAll(" +", "");
// remove netgroup name at the beginning of the string
usersRaw = usersRaw.replaceFirst(
netgroup.replaceFirst("@", "") + "[()]+",
"");
// split string into user infos
String[] userInfos = usersRaw.split("[()]+");
for(String userInfo : userInfos) {
// userInfo: xxx,user,yyy (xxx, yyy can be empty strings)
// get rid of everything before first and after last comma
String user = userInfo.replaceFirst("[^,]*,", "");
user = user.replaceFirst(",.*$", "");
// voila! got username!
users.add(user);
}
return users;
}
/**
* Calls shell to get users for a netgroup by calling getent
* netgroup, this is a low level function that just returns string
* that
*
* @param netgroup get users for this netgroup
* @return string of users for a given netgroup in getent netgroups format
*/
protected String execShellGetUserForNetgroup(final String netgroup)
throws IOException {
String result = "";
try {
// shell command does not expect '@' at the begining of the group name
result = Shell.execCommand(
Shell.getUsersForNetgroupCommand(netgroup.substring(1)));
} catch (ExitCodeException e) {
// if we didn't get the group - just return empty list;
LOG.warn("error getting users for netgroup " + netgroup, e);
}
return result;
}
}
| 4,899 | 32.561644 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IdMappingServiceProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
/**
* An interface for the implementation of <userId, userName> mapping
* and <groupId, groupName> mapping
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface IdMappingServiceProvider {
// Return uid for given user name
public int getUid(String user) throws IOException;
// Return gid for given group name
public int getGid(String group) throws IOException;
// Return user name for given user id uid, if not found, return
// <unknown> passed to this method
public String getUserName(int uid, String unknown);
// Return group name for given groupd id gid, if not found, return
// <unknown> passed to this method
public String getGroupName(int gid, String unknown);
// Return uid for given user name.
// When can't map user, return user name's string hashcode
public int getUidAllowingUnknown(String user);
// Return gid for given group name.
// When can't map group, return group name's string hashcode
public int getGidAllowingUnknown(String group);
}
| 2,095 | 35.77193 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Ticker;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Timer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A user-to-groups mapping service.
*
* {@link Groups} allows for server to get the various group memberships
* of a given user via the {@link #getGroups(String)} call, thus ensuring
* a consistent user-to-groups mapping and protects against vagaries of
* different mappings on servers and clients in a Hadoop cluster.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class Groups {
private static final Log LOG = LogFactory.getLog(Groups.class);
private final GroupMappingServiceProvider impl;
private final LoadingCache<String, List<String>> cache;
private final Map<String, List<String>> staticUserToGroupsMap =
new HashMap<String, List<String>>();
private final long cacheTimeout;
private final long negativeCacheTimeout;
private final long warningDeltaMs;
private final Timer timer;
private Set<String> negativeCache;
public Groups(Configuration conf) {
this(conf, new Timer());
}
public Groups(Configuration conf, final Timer timer) {
impl =
ReflectionUtils.newInstance(
conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
ShellBasedUnixGroupsMapping.class,
GroupMappingServiceProvider.class),
conf);
cacheTimeout =
conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS,
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000;
negativeCacheTimeout =
conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT) * 1000;
warningDeltaMs =
conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS,
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT);
parseStaticMapping(conf);
this.timer = timer;
this.cache = CacheBuilder.newBuilder()
.refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS)
.ticker(new TimerToTickerAdapter(timer))
.expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS)
.build(new GroupCacheLoader());
if(negativeCacheTimeout > 0) {
Cache<String, Boolean> tempMap = CacheBuilder.newBuilder()
.expireAfterWrite(negativeCacheTimeout, TimeUnit.MILLISECONDS)
.ticker(new TimerToTickerAdapter(timer))
.build();
negativeCache = Collections.newSetFromMap(tempMap.asMap());
}
if(LOG.isDebugEnabled())
LOG.debug("Group mapping impl=" + impl.getClass().getName() +
"; cacheTimeout=" + cacheTimeout + "; warningDeltaMs=" +
warningDeltaMs);
}
@VisibleForTesting
Set<String> getNegativeCache() {
return negativeCache;
}
/*
* Parse the hadoop.user.group.static.mapping.overrides configuration to
* staticUserToGroupsMap
*/
private void parseStaticMapping(Configuration conf) {
String staticMapping = conf.get(
CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES,
CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT);
Collection<String> mappings = StringUtils.getStringCollection(
staticMapping, ";");
for (String users : mappings) {
Collection<String> userToGroups = StringUtils.getStringCollection(users,
"=");
if (userToGroups.size() < 1 || userToGroups.size() > 2) {
throw new HadoopIllegalArgumentException("Configuration "
+ CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES
+ " is invalid");
}
String[] userToGroupsArray = userToGroups.toArray(new String[userToGroups
.size()]);
String user = userToGroupsArray[0];
List<String> groups = Collections.emptyList();
if (userToGroupsArray.length == 2) {
groups = (List<String>) StringUtils
.getStringCollection(userToGroupsArray[1]);
}
staticUserToGroupsMap.put(user, groups);
}
}
private boolean isNegativeCacheEnabled() {
return negativeCacheTimeout > 0;
}
private IOException noGroupsForUser(String user) {
return new IOException("No groups found for user " + user);
}
/**
* Get the group memberships of a given user.
* If the user's group is not cached, this method may block.
* @param user User's name
* @return the group memberships of the user
* @throws IOException if user does not exist
*/
public List<String> getGroups(final String user) throws IOException {
// No need to lookup for groups of static users
List<String> staticMapping = staticUserToGroupsMap.get(user);
if (staticMapping != null) {
return staticMapping;
}
// Check the negative cache first
if (isNegativeCacheEnabled()) {
if (negativeCache.contains(user)) {
throw noGroupsForUser(user);
}
}
try {
return cache.get(user);
} catch (ExecutionException e) {
throw (IOException)e.getCause();
}
}
/**
* Convert millisecond times from hadoop's timer to guava's nanosecond ticker.
*/
private static class TimerToTickerAdapter extends Ticker {
private Timer timer;
public TimerToTickerAdapter(Timer timer) {
this.timer = timer;
}
@Override
public long read() {
final long NANOSECONDS_PER_MS = 1000000;
return timer.monotonicNow() * NANOSECONDS_PER_MS;
}
}
/**
* Deals with loading data into the cache.
*/
private class GroupCacheLoader extends CacheLoader<String, List<String>> {
/**
* This method will block if a cache entry doesn't exist, and
* any subsequent requests for the same user will wait on this
* request to return. If a user already exists in the cache,
* this will be run in the background.
* @param user key of cache
* @return List of groups belonging to user
* @throws IOException to prevent caching negative entries
*/
@Override
public List<String> load(String user) throws Exception {
List<String> groups = fetchGroupList(user);
if (groups.isEmpty()) {
if (isNegativeCacheEnabled()) {
negativeCache.add(user);
}
// We throw here to prevent Cache from retaining an empty group
throw noGroupsForUser(user);
}
return groups;
}
/**
* Queries impl for groups belonging to the user. This could involve I/O and take awhile.
*/
private List<String> fetchGroupList(String user) throws IOException {
long startMs = timer.monotonicNow();
List<String> groupList = impl.getGroups(user);
long endMs = timer.monotonicNow();
long deltaMs = endMs - startMs ;
UserGroupInformation.metrics.addGetGroups(deltaMs);
if (deltaMs > warningDeltaMs) {
LOG.warn("Potential performance problem: getGroups(user=" + user +") " +
"took " + deltaMs + " milliseconds.");
}
return groupList;
}
}
/**
* Refresh all user-to-groups mappings.
*/
public void refresh() {
LOG.info("clearing userToGroupsMap cache");
try {
impl.cacheGroupsRefresh();
} catch (IOException e) {
LOG.warn("Error refreshing groups cache", e);
}
cache.invalidateAll();
if(isNegativeCacheEnabled()) {
negativeCache.clear();
}
}
/**
* Add groups to cache
*
* @param groups list of groups to add to cache
*/
public void cacheGroupsAdd(List<String> groups) {
try {
impl.cacheGroupsAdd(groups);
} catch (IOException e) {
LOG.warn("Error caching groups", e);
}
}
private static Groups GROUPS = null;
/**
* Get the groups being used to map user-to-groups.
* @return the groups being used to map user-to-groups.
*/
public static Groups getUserToGroupsMappingService() {
return getUserToGroupsMappingService(new Configuration());
}
/**
* Get the groups being used to map user-to-groups.
* @param conf
* @return the groups being used to map user-to-groups.
*/
public static synchronized Groups getUserToGroupsMappingService(
Configuration conf) {
if(GROUPS == null) {
if(LOG.isDebugEnabled()) {
LOG.debug(" Creating new Groups object");
}
GROUPS = new Groups(conf);
}
return GROUPS;
}
/**
* Create new groups used to map user-to-groups with loaded configuration.
* @param conf
* @return the groups being used to map user-to-groups.
*/
@Private
public static synchronized Groups
getUserToGroupsMappingServiceWithLoadedConfiguration(
Configuration conf) {
GROUPS = new Groups(conf);
return GROUPS;
}
}
| 10,633 | 32.024845 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
/**
* A simple shell-based implementation of {@link IdMappingServiceProvider}
* Map id to user name or group name. It does update every 15 minutes. Only a
* single instance of this class is expected to be on the server.
*
* The maps are incrementally updated as described below:
* 1. Initialize the maps as empty.
* 2. Incrementally update the maps
* - When ShellBasedIdMapping is requested for user or group name given
* an ID, or for ID given a user or group name, do look up in the map
* first, if it doesn't exist, find the corresponding entry with shell
* command, and insert the entry to the maps.
* - When group ID is requested for a given group name, and if the
* group name is numerical, the full group map is loaded. Because we
* don't have a good way to find the entry for a numerical group name,
* loading the full map helps to get in all entries.
* 3. Periodically refresh the maps for both user and group, e.g,
* do step 1.
* Note: for testing purpose, step 1 may initial the maps with full mapping
* when using constructor
* {@link ShellBasedIdMapping#ShellBasedIdMapping(Configuration, boolean)}.
*/
public class ShellBasedIdMapping implements IdMappingServiceProvider {
private static final Log LOG =
LogFactory.getLog(ShellBasedIdMapping.class);
private final static String OS = System.getProperty("os.name");
/** Shell commands to get users and groups */
static final String GET_ALL_USERS_CMD = "getent passwd | cut -d: -f1,3";
static final String GET_ALL_GROUPS_CMD = "getent group | cut -d: -f1,3";
static final String MAC_GET_ALL_USERS_CMD = "dscl . -list /Users UniqueID";
static final String MAC_GET_ALL_GROUPS_CMD = "dscl . -list /Groups PrimaryGroupID";
private final File staticMappingFile;
private StaticMapping staticMapping = null;
// Last time the static map was modified, measured time difference in
// milliseconds since midnight, January 1, 1970 UTC
private long lastModificationTimeStaticMap = 0;
private boolean constructFullMapAtInit = false;
// Used for parsing the static mapping file.
private static final Pattern EMPTY_LINE = Pattern.compile("^\\s*$");
private static final Pattern COMMENT_LINE = Pattern.compile("^\\s*#.*$");
private static final Pattern MAPPING_LINE =
Pattern.compile("^(uid|gid)\\s+(\\d+)\\s+(\\d+)\\s*(#.*)?$");
final private long timeout;
// Maps for id to name map. Guarded by this object monitor lock
private BiMap<Integer, String> uidNameMap = HashBiMap.create();
private BiMap<Integer, String> gidNameMap = HashBiMap.create();
private long lastUpdateTime = 0; // Last time maps were updated
/*
* Constructor
* @param conf the configuration
* @param constructFullMapAtInit initialize the maps with full mapping when
* true, otherwise initialize the maps to empty. This parameter is
* intended for testing only, its default is false.
*/
@VisibleForTesting
public ShellBasedIdMapping(Configuration conf,
boolean constructFullMapAtInit) throws IOException {
this.constructFullMapAtInit = constructFullMapAtInit;
long updateTime = conf.getLong(
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY,
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_DEFAULT);
// Minimal interval is 1 minute
if (updateTime < IdMappingConstant.USERGROUPID_UPDATE_MILLIS_MIN) {
LOG.info("User configured user account update time is less"
+ " than 1 minute. Use 1 minute instead.");
timeout = IdMappingConstant.USERGROUPID_UPDATE_MILLIS_MIN;
} else {
timeout = updateTime;
}
String staticFilePath =
conf.get(IdMappingConstant.STATIC_ID_MAPPING_FILE_KEY,
IdMappingConstant.STATIC_ID_MAPPING_FILE_DEFAULT);
staticMappingFile = new File(staticFilePath);
updateStaticMapping();
updateMaps();
}
/*
* Constructor
* initialize user and group maps to empty
* @param conf the configuration
*/
public ShellBasedIdMapping(Configuration conf) throws IOException {
this(conf, false);
}
@VisibleForTesting
public long getTimeout() {
return timeout;
}
@VisibleForTesting
public BiMap<Integer, String> getUidNameMap() {
return uidNameMap;
}
@VisibleForTesting
public BiMap<Integer, String> getGidNameMap() {
return gidNameMap;
}
@VisibleForTesting
synchronized public void clearNameMaps() {
uidNameMap.clear();
gidNameMap.clear();
lastUpdateTime = Time.monotonicNow();
}
synchronized private boolean isExpired() {
return Time.monotonicNow() - lastUpdateTime > timeout;
}
// If can't update the maps, will keep using the old ones
private void checkAndUpdateMaps() {
if (isExpired()) {
LOG.info("Update cache now");
try {
updateMaps();
} catch (IOException e) {
LOG.error("Can't update the maps. Will use the old ones,"
+ " which can potentially cause problem.", e);
}
}
}
private static final String DUPLICATE_NAME_ID_DEBUG_INFO =
"NFS gateway could have problem starting with duplicate name or id on the host system.\n"
+ "This is because HDFS (non-kerberos cluster) uses name as the only way to identify a user or group.\n"
+ "The host system with duplicated user/group name or id might work fine most of the time by itself.\n"
+ "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n"
+ "Therefore, same name means the same user or same group. To find the duplicated names/ids, one can do:\n"
+ "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systems,\n"
+ "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS.";
private static void reportDuplicateEntry(final String header,
final Integer key, final String value,
final Integer ekey, final String evalue) {
LOG.warn("\n" + header + String.format(
"new entry (%d, %s), existing entry: (%d, %s).%n%s%n%s",
key, value, ekey, evalue,
"The new entry is to be ignored for the following reason.",
DUPLICATE_NAME_ID_DEBUG_INFO));
}
/**
* uid and gid are defined as uint32 in linux. Some systems create
* (intended or unintended) <nfsnobody, 4294967294> kind of <name,Id>
* mapping, where 4294967294 is 2**32-2 as unsigned int32. As an example,
* https://bugzilla.redhat.com/show_bug.cgi?id=511876.
* Because user or group id are treated as Integer (signed integer or int32)
* here, the number 4294967294 is out of range. The solution is to convert
* uint32 to int32, so to map the out-of-range ID to the negative side of
* Integer, e.g. 4294967294 maps to -2 and 4294967295 maps to -1.
*/
private static Integer parseId(final String idStr) {
Long longVal = Long.parseLong(idStr);
int intVal = longVal.intValue();
return Integer.valueOf(intVal);
}
/**
* Get the list of users or groups returned by the specified command,
* and save them in the corresponding map.
* @throws IOException
*/
@VisibleForTesting
public static boolean updateMapInternal(BiMap<Integer, String> map,
String mapName, String command, String regex,
Map<Integer, Integer> staticMapping) throws IOException {
boolean updated = false;
BufferedReader br = null;
try {
Process process = Runtime.getRuntime().exec(
new String[] { "bash", "-c", command });
br = new BufferedReader(
new InputStreamReader(process.getInputStream(),
Charset.defaultCharset()));
String line = null;
while ((line = br.readLine()) != null) {
String[] nameId = line.split(regex);
if ((nameId == null) || (nameId.length != 2)) {
throw new IOException("Can't parse " + mapName + " list entry:" + line);
}
LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
// HDFS can't differentiate duplicate names with simple authentication
final Integer key = staticMapping.get(parseId(nameId[1]));
final String value = nameId[0];
if (map.containsKey(key)) {
final String prevValue = map.get(key);
if (value.equals(prevValue)) {
// silently ignore equivalent entries
continue;
}
reportDuplicateEntry(
"Got multiple names associated with the same id: ",
key, value, key, prevValue);
continue;
}
if (map.containsValue(value)) {
final Integer prevKey = map.inverse().get(value);
reportDuplicateEntry(
"Got multiple ids associated with the same name: ",
key, value, prevKey, value);
continue;
}
map.put(key, value);
updated = true;
}
LOG.debug("Updated " + mapName + " map size: " + map.size());
} catch (IOException e) {
LOG.error("Can't update " + mapName + " map");
throw e;
} finally {
if (br != null) {
try {
br.close();
} catch (IOException e1) {
LOG.error("Can't close BufferedReader of command result", e1);
}
}
}
return updated;
}
private boolean checkSupportedPlatform() {
if (!OS.startsWith("Linux") && !OS.startsWith("Mac")) {
LOG.error("Platform is not supported:" + OS
+ ". Can't update user map and group map and"
+ " 'nobody' will be used for any user and group.");
return false;
}
return true;
}
private static boolean isInteger(final String s) {
try {
Integer.parseInt(s);
} catch(NumberFormatException e) {
return false;
}
// only got here if we didn't return false
return true;
}
private synchronized void updateStaticMapping() throws IOException {
final boolean init = (staticMapping == null);
//
// if the static mapping file
// - was modified after last update, load the map again;
// - did not exist but was added since last update, load the map;
// - existed before but deleted since last update, clear the map
//
if (staticMappingFile.exists()) {
// check modification time, reload the file if the last modification
// time changed since prior load.
long lmTime = staticMappingFile.lastModified();
if (lmTime != lastModificationTimeStaticMap) {
LOG.info(init? "Using " : "Reloading " + "'" + staticMappingFile
+ "' for static UID/GID mapping...");
lastModificationTimeStaticMap = lmTime;
staticMapping = parseStaticMap(staticMappingFile);
}
} else {
if (init) {
staticMapping = new StaticMapping(new HashMap<Integer, Integer>(),
new HashMap<Integer, Integer>());
}
if (lastModificationTimeStaticMap != 0 || init) {
// print the following log at initialization or when the static
// mapping file was deleted after prior load
LOG.info("Not doing static UID/GID mapping because '"
+ staticMappingFile + "' does not exist.");
}
lastModificationTimeStaticMap = 0;
staticMapping.clear();
}
}
/*
* Refresh static map, and reset the other maps to empty.
* For testing code, a full map may be re-constructed here when the object
* was created with constructFullMapAtInit being set to true.
*/
synchronized public void updateMaps() throws IOException {
if (!checkSupportedPlatform()) {
return;
}
if (constructFullMapAtInit) {
loadFullMaps();
// set constructFullMapAtInit to false to allow testing code to
// do incremental update to maps after initial construction
constructFullMapAtInit = false;
} else {
updateStaticMapping();
clearNameMaps();
}
}
synchronized private void loadFullUserMap() throws IOException {
BiMap<Integer, String> uMap = HashBiMap.create();
if (OS.startsWith("Mac")) {
updateMapInternal(uMap, "user", MAC_GET_ALL_USERS_CMD, "\\s+",
staticMapping.uidMapping);
} else {
updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
staticMapping.uidMapping);
}
uidNameMap = uMap;
lastUpdateTime = Time.monotonicNow();
}
synchronized private void loadFullGroupMap() throws IOException {
BiMap<Integer, String> gMap = HashBiMap.create();
if (OS.startsWith("Mac")) {
updateMapInternal(gMap, "group", MAC_GET_ALL_GROUPS_CMD, "\\s+",
staticMapping.gidMapping);
} else {
updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
staticMapping.gidMapping);
}
gidNameMap = gMap;
lastUpdateTime = Time.monotonicNow();
}
synchronized private void loadFullMaps() throws IOException {
loadFullUserMap();
loadFullGroupMap();
}
// search for id with given name, return "<name>:<id>"
// return
// getent group <name> | cut -d: -f1,3
// OR
// id -u <name> | awk '{print "<name>:"$1 }'
//
private String getName2IdCmdLinux(final String name, final boolean isGrp) {
String cmd;
if (isGrp) {
cmd = "getent group " + name + " | cut -d: -f1,3";
} else {
cmd = "id -u " + name + " | awk '{print \"" + name + ":\"$1 }'";
}
return cmd;
}
// search for name with given id, return "<name>:<id>"
private String getId2NameCmdLinux(final int id, final boolean isGrp) {
String cmd = "getent ";
cmd += isGrp? "group " : "passwd ";
cmd += String.valueOf(id) + " | cut -d: -f1,3";
return cmd;
}
// "dscl . -read /Users/<name> | grep UniqueID" returns "UniqueId: <id>",
// "dscl . -read /Groups/<name> | grep PrimaryGroupID" returns "PrimaryGoupID: <id>"
// The following method returns a command that uses awk to process the result,
// of these commands, and returns "<name> <id>", to simulate one entry returned by
// MAC_GET_ALL_USERS_CMD or MAC_GET_ALL_GROUPS_CMD.
// Specificially, this method returns:
// id -u <name> | awk '{print "<name>:"$1 }'
// OR
// dscl . -read /Groups/<name> | grep PrimaryGroupID | awk '($1 == "PrimaryGroupID:") { print "<name> " $2 }'
//
private String getName2IdCmdMac(final String name, final boolean isGrp) {
String cmd;
if (isGrp) {
cmd = "dscl . -read /Groups/" + name;
cmd += " | grep PrimaryGroupID | awk '($1 == \"PrimaryGroupID:\") ";
cmd += "{ print \"" + name + " \" $2 }'";
} else {
cmd = "id -u " + name + " | awk '{print \"" + name + " \"$1 }'";
}
return cmd;
}
// "dscl . -search /Users UniqueID <id>" returns
// <name> UniqueID = (
// <id>
// )
// "dscl . -search /Groups PrimaryGroupID <id>" returns
// <name> PrimaryGroupID = (
// <id>
// )
// The following method returns a command that uses sed to process the
// the result and returns "<name> <id>" to simulate one entry returned
// by MAC_GET_ALL_USERS_CMD or MAC_GET_ALL_GROUPS_CMD.
// For certain negative id case like nfsnobody, the <id> is quoted as
// "<id>", added one sed section to remove the quote.
// Specifically, the method returns:
// dscl . -search /Users UniqueID <id> | sed 'N;s/\\n//g;N;s/\\n//g' | sed 's/UniqueID =//g' | sed 's/)//g' | sed 's/\"//g'
// OR
// dscl . -search /Groups PrimaryGroupID <id> | sed 'N;s/\\n//g;N;s/\\n//g' | sed 's/PrimaryGroupID =//g' | sed 's/)//g' | sed 's/\"//g'
//
private String getId2NameCmdMac(final int id, final boolean isGrp) {
String cmd = "dscl . -search /";
cmd += isGrp? "Groups PrimaryGroupID " : "Users UniqueID ";
cmd += String.valueOf(id);
cmd += " | sed 'N;s/\\n//g;N;s/\\n//g' | sed 's/";
cmd += isGrp? "PrimaryGroupID" : "UniqueID";
cmd += " = (//g' | sed 's/)//g' | sed 's/\\\"//g'";
return cmd;
}
synchronized private void updateMapIncr(final String name,
final boolean isGrp) throws IOException {
if (!checkSupportedPlatform()) {
return;
}
if (isInteger(name) && isGrp) {
loadFullGroupMap();
return;
}
boolean updated = false;
updateStaticMapping();
if (OS.startsWith("Linux")) {
if (isGrp) {
updated = updateMapInternal(gidNameMap, "group",
getName2IdCmdLinux(name, true), ":",
staticMapping.gidMapping);
} else {
updated = updateMapInternal(uidNameMap, "user",
getName2IdCmdLinux(name, false), ":",
staticMapping.uidMapping);
}
} else {
// Mac
if (isGrp) {
updated = updateMapInternal(gidNameMap, "group",
getName2IdCmdMac(name, true), "\\s+",
staticMapping.gidMapping);
} else {
updated = updateMapInternal(uidNameMap, "user",
getName2IdCmdMac(name, false), "\\s+",
staticMapping.uidMapping);
}
}
if (updated) {
lastUpdateTime = Time.monotonicNow();
}
}
synchronized private void updateMapIncr(final int id,
final boolean isGrp) throws IOException {
if (!checkSupportedPlatform()) {
return;
}
boolean updated = false;
updateStaticMapping();
if (OS.startsWith("Linux")) {
if (isGrp) {
updated = updateMapInternal(gidNameMap, "group",
getId2NameCmdLinux(id, true), ":",
staticMapping.gidMapping);
} else {
updated = updateMapInternal(uidNameMap, "user",
getId2NameCmdLinux(id, false), ":",
staticMapping.uidMapping);
}
} else {
// Mac
if (isGrp) {
updated = updateMapInternal(gidNameMap, "group",
getId2NameCmdMac(id, true), "\\s+",
staticMapping.gidMapping);
} else {
updated = updateMapInternal(uidNameMap, "user",
getId2NameCmdMac(id, false), "\\s+",
staticMapping.uidMapping);
}
}
if (updated) {
lastUpdateTime = Time.monotonicNow();
}
}
@SuppressWarnings("serial")
static final class PassThroughMap<K> extends HashMap<K, K> {
public PassThroughMap() {
this(new HashMap<K, K>());
}
public PassThroughMap(Map<K, K> mapping) {
super();
for (Map.Entry<K, K> entry : mapping.entrySet()) {
super.put(entry.getKey(), entry.getValue());
}
}
@SuppressWarnings("unchecked")
@Override
public K get(Object key) {
if (super.containsKey(key)) {
return super.get(key);
} else {
return (K) key;
}
}
}
@VisibleForTesting
static final class StaticMapping {
final Map<Integer, Integer> uidMapping;
final Map<Integer, Integer> gidMapping;
public StaticMapping(Map<Integer, Integer> uidMapping,
Map<Integer, Integer> gidMapping) {
this.uidMapping = new PassThroughMap<Integer>(uidMapping);
this.gidMapping = new PassThroughMap<Integer>(gidMapping);
}
public void clear() {
uidMapping.clear();
gidMapping.clear();
}
public boolean isNonEmpty() {
return uidMapping.size() > 0 || gidMapping.size() > 0;
}
}
static StaticMapping parseStaticMap(File staticMapFile)
throws IOException {
Map<Integer, Integer> uidMapping = new HashMap<Integer, Integer>();
Map<Integer, Integer> gidMapping = new HashMap<Integer, Integer>();
BufferedReader in = new BufferedReader(new InputStreamReader(
new FileInputStream(staticMapFile), Charsets.UTF_8));
try {
String line = null;
while ((line = in.readLine()) != null) {
// Skip entirely empty and comment lines.
if (EMPTY_LINE.matcher(line).matches() ||
COMMENT_LINE.matcher(line).matches()) {
continue;
}
Matcher lineMatcher = MAPPING_LINE.matcher(line);
if (!lineMatcher.matches()) {
LOG.warn("Could not parse line '" + line + "'. Lines should be of " +
"the form '[uid|gid] [remote id] [local id]'. Blank lines and " +
"everything following a '#' on a line will be ignored.");
continue;
}
// We know the line is fine to parse without error checking like this
// since it matched the regex above.
String firstComponent = lineMatcher.group(1);
int remoteId = parseId(lineMatcher.group(2));
int localId = parseId(lineMatcher.group(3));
if (firstComponent.equals("uid")) {
uidMapping.put(localId, remoteId);
} else {
gidMapping.put(localId, remoteId);
}
}
} finally {
in.close();
}
return new StaticMapping(uidMapping, gidMapping);
}
synchronized public int getUid(String user) throws IOException {
checkAndUpdateMaps();
Integer id = uidNameMap.inverse().get(user);
if (id == null) {
updateMapIncr(user, false);
id = uidNameMap.inverse().get(user);
if (id == null) {
throw new IOException("User just deleted?:" + user);
}
}
return id.intValue();
}
synchronized public int getGid(String group) throws IOException {
checkAndUpdateMaps();
Integer id = gidNameMap.inverse().get(group);
if (id == null) {
updateMapIncr(group, true);
id = gidNameMap.inverse().get(group);
if (id == null) {
throw new IOException("No such group:" + group);
}
}
return id.intValue();
}
synchronized public String getUserName(int uid, String unknown) {
checkAndUpdateMaps();
String uname = uidNameMap.get(uid);
if (uname == null) {
try {
updateMapIncr(uid, false);
} catch (Exception e) {
}
uname = uidNameMap.get(uid);
if (uname == null) {
LOG.warn("Can't find user name for uid " + uid
+ ". Use default user name " + unknown);
uname = unknown;
}
}
return uname;
}
synchronized public String getGroupName(int gid, String unknown) {
checkAndUpdateMaps();
String gname = gidNameMap.get(gid);
if (gname == null) {
try {
updateMapIncr(gid, true);
} catch (Exception e) {
}
gname = gidNameMap.get(gid);
if (gname == null) {
LOG.warn("Can't find group name for gid " + gid
+ ". Use default group name " + unknown);
gname = unknown;
}
}
return gname;
}
// When can't map user, return user name's string hashcode
public int getUidAllowingUnknown(String user) {
checkAndUpdateMaps();
int uid;
try {
uid = getUid(user);
} catch (IOException e) {
uid = user.hashCode();
LOG.info("Can't map user " + user + ". Use its string hashcode:" + uid);
}
return uid;
}
// When can't map group, return group name's string hashcode
public int getGidAllowingUnknown(String group) {
checkAndUpdateMaps();
int gid;
try {
gid = getGid(group);
} catch (IOException e) {
gid = group.hashCode();
LOG.info("Can't map group " + group + ". Use its string hashcode:" + gid);
}
return gid;
}
}
| 24,758 | 33.725105 | 138 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AnnotatedSecurityInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.TokenInfo;
/**
* Constructs SecurityInfo from Annotations provided in protocol interface.
*/
public class AnnotatedSecurityInfo extends SecurityInfo {
@Override
public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
return protocol.getAnnotation(KerberosInfo.class);
}
@Override
public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
return protocol.getAnnotation(TokenInfo.class);
}
}
| 1,383 | 32.756098 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.security.Security;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.AuthorizeCallback;
import javax.security.sasl.RealmCallback;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import javax.security.sasl.SaslServerFactory;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.Server.Connection;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.TokenIdentifier;
/**
* A utility class for dealing with SASL on RPC server
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class SaslRpcServer {
public static final Log LOG = LogFactory.getLog(SaslRpcServer.class);
public static final String SASL_DEFAULT_REALM = "default";
private static SaslServerFactory saslFactory;
public static enum QualityOfProtection {
AUTHENTICATION("auth"),
INTEGRITY("auth-int"),
PRIVACY("auth-conf");
public final String saslQop;
private QualityOfProtection(String saslQop) {
this.saslQop = saslQop;
}
public String getSaslQop() {
return saslQop;
}
}
@InterfaceAudience.Private
@InterfaceStability.Unstable
public AuthMethod authMethod;
public String mechanism;
public String protocol;
public String serverId;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public SaslRpcServer(AuthMethod authMethod) throws IOException {
this.authMethod = authMethod;
mechanism = authMethod.getMechanismName();
switch (authMethod) {
case SIMPLE: {
return; // no sasl for simple
}
case TOKEN: {
protocol = "";
serverId = SaslRpcServer.SASL_DEFAULT_REALM;
break;
}
case KERBEROS: {
String fullName = UserGroupInformation.getCurrentUser().getUserName();
if (LOG.isDebugEnabled())
LOG.debug("Kerberos principal name is " + fullName);
// don't use KerberosName because we don't want auth_to_local
String[] parts = fullName.split("[/@]", 3);
protocol = parts[0];
// should verify service host is present here rather than in create()
// but lazy tests are using a UGI that isn't a SPN...
serverId = (parts.length < 2) ? "" : parts[1];
break;
}
default:
// we should never be able to get here
throw new AccessControlException(
"Server does not support SASL " + authMethod);
}
}
@InterfaceAudience.Private
@InterfaceStability.Unstable
public SaslServer create(final Connection connection,
final Map<String,?> saslProperties,
SecretManager<TokenIdentifier> secretManager
) throws IOException, InterruptedException {
UserGroupInformation ugi = null;
final CallbackHandler callback;
switch (authMethod) {
case TOKEN: {
callback = new SaslDigestCallbackHandler(secretManager, connection);
break;
}
case KERBEROS: {
ugi = UserGroupInformation.getCurrentUser();
if (serverId.isEmpty()) {
throw new AccessControlException(
"Kerberos principal name does NOT have the expected "
+ "hostname part: " + ugi.getUserName());
}
callback = new SaslGssCallbackHandler();
break;
}
default:
// we should never be able to get here
throw new AccessControlException(
"Server does not support SASL " + authMethod);
}
final SaslServer saslServer;
if (ugi != null) {
saslServer = ugi.doAs(
new PrivilegedExceptionAction<SaslServer>() {
@Override
public SaslServer run() throws SaslException {
return saslFactory.createSaslServer(mechanism, protocol, serverId,
saslProperties, callback);
}
});
} else {
saslServer = saslFactory.createSaslServer(mechanism, protocol, serverId,
saslProperties, callback);
}
if (saslServer == null) {
throw new AccessControlException(
"Unable to find SASL server implementation for " + mechanism);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Created SASL server with mechanism = " + mechanism);
}
return saslServer;
}
public static void init(Configuration conf) {
Security.addProvider(new SaslPlainServer.SecurityProvider());
// passing null so factory is populated with all possibilities. the
// properties passed when instantiating a server are what really matter
saslFactory = new FastSaslServerFactory(null);
}
static String encodeIdentifier(byte[] identifier) {
return new String(Base64.encodeBase64(identifier), Charsets.UTF_8);
}
static byte[] decodeIdentifier(String identifier) {
return Base64.decodeBase64(identifier.getBytes(Charsets.UTF_8));
}
public static <T extends TokenIdentifier> T getIdentifier(String id,
SecretManager<T> secretManager) throws InvalidToken {
byte[] tokenId = decodeIdentifier(id);
T tokenIdentifier = secretManager.createIdentifier();
try {
tokenIdentifier.readFields(new DataInputStream(new ByteArrayInputStream(
tokenId)));
} catch (IOException e) {
throw (InvalidToken) new InvalidToken(
"Can't de-serialize tokenIdentifier").initCause(e);
}
return tokenIdentifier;
}
static char[] encodePassword(byte[] password) {
return new String(Base64.encodeBase64(password),
Charsets.UTF_8).toCharArray();
}
/** Splitting fully qualified Kerberos name into parts */
public static String[] splitKerberosName(String fullName) {
return fullName.split("[/@]");
}
/** Authentication method */
@InterfaceStability.Evolving
public static enum AuthMethod {
SIMPLE((byte) 80, ""),
KERBEROS((byte) 81, "GSSAPI"),
@Deprecated
DIGEST((byte) 82, "DIGEST-MD5"),
TOKEN((byte) 82, "DIGEST-MD5"),
PLAIN((byte) 83, "PLAIN");
/** The code for this method. */
public final byte code;
public final String mechanismName;
private AuthMethod(byte code, String mechanismName) {
this.code = code;
this.mechanismName = mechanismName;
}
private static final int FIRST_CODE = values()[0].code;
/** Return the object represented by the code. */
private static AuthMethod valueOf(byte code) {
final int i = (code & 0xff) - FIRST_CODE;
return i < 0 || i >= values().length ? null : values()[i];
}
/** Return the SASL mechanism name */
public String getMechanismName() {
return mechanismName;
}
/** Read from in */
public static AuthMethod read(DataInput in) throws IOException {
return valueOf(in.readByte());
}
/** Write to out */
public void write(DataOutput out) throws IOException {
out.write(code);
}
};
/** CallbackHandler for SASL DIGEST-MD5 mechanism */
@InterfaceStability.Evolving
public static class SaslDigestCallbackHandler implements CallbackHandler {
private SecretManager<TokenIdentifier> secretManager;
private Server.Connection connection;
public SaslDigestCallbackHandler(
SecretManager<TokenIdentifier> secretManager,
Server.Connection connection) {
this.secretManager = secretManager;
this.connection = connection;
}
private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken,
StandbyException, RetriableException, IOException {
return encodePassword(secretManager.retriableRetrievePassword(tokenid));
}
@Override
public void handle(Callback[] callbacks) throws InvalidToken,
UnsupportedCallbackException, StandbyException, RetriableException,
IOException {
NameCallback nc = null;
PasswordCallback pc = null;
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if (callback instanceof AuthorizeCallback) {
ac = (AuthorizeCallback) callback;
} else if (callback instanceof NameCallback) {
nc = (NameCallback) callback;
} else if (callback instanceof PasswordCallback) {
pc = (PasswordCallback) callback;
} else if (callback instanceof RealmCallback) {
continue; // realm is ignored
} else {
throw new UnsupportedCallbackException(callback,
"Unrecognized SASL DIGEST-MD5 Callback");
}
}
if (pc != null) {
TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(),
secretManager);
char[] password = getPassword(tokenIdentifier);
UserGroupInformation user = null;
user = tokenIdentifier.getUser(); // may throw exception
connection.attemptingUser = user;
if (LOG.isDebugEnabled()) {
LOG.debug("SASL server DIGEST-MD5 callback: setting password "
+ "for client: " + tokenIdentifier.getUser());
}
pc.setPassword(password);
}
if (ac != null) {
String authid = ac.getAuthenticationID();
String authzid = ac.getAuthorizationID();
if (authid.equals(authzid)) {
ac.setAuthorized(true);
} else {
ac.setAuthorized(false);
}
if (ac.isAuthorized()) {
if (LOG.isDebugEnabled()) {
String username =
getIdentifier(authzid, secretManager).getUser().getUserName();
LOG.debug("SASL server DIGEST-MD5 callback: setting "
+ "canonicalized client ID: " + username);
}
ac.setAuthorizedID(authzid);
}
}
}
}
/** CallbackHandler for SASL GSSAPI Kerberos mechanism */
@InterfaceStability.Evolving
public static class SaslGssCallbackHandler implements CallbackHandler {
@Override
public void handle(Callback[] callbacks) throws
UnsupportedCallbackException {
AuthorizeCallback ac = null;
for (Callback callback : callbacks) {
if (callback instanceof AuthorizeCallback) {
ac = (AuthorizeCallback) callback;
} else {
throw new UnsupportedCallbackException(callback,
"Unrecognized SASL GSSAPI Callback");
}
}
if (ac != null) {
String authid = ac.getAuthenticationID();
String authzid = ac.getAuthorizationID();
if (authid.equals(authzid)) {
ac.setAuthorized(true);
} else {
ac.setAuthorized(false);
}
if (ac.isAuthorized()) {
if (LOG.isDebugEnabled())
LOG.debug("SASL server GSSAPI callback: setting "
+ "canonicalized client ID: " + authzid);
ac.setAuthorizedID(authzid);
}
}
}
}
// Sasl.createSaslServer is 100-200X slower than caching the factories!
private static class FastSaslServerFactory implements SaslServerFactory {
private final Map<String,List<SaslServerFactory>> factoryCache =
new HashMap<String,List<SaslServerFactory>>();
FastSaslServerFactory(Map<String,?> props) {
final Enumeration<SaslServerFactory> factories =
Sasl.getSaslServerFactories();
while (factories.hasMoreElements()) {
SaslServerFactory factory = factories.nextElement();
for (String mech : factory.getMechanismNames(props)) {
if (!factoryCache.containsKey(mech)) {
factoryCache.put(mech, new ArrayList<SaslServerFactory>());
}
factoryCache.get(mech).add(factory);
}
}
}
@Override
public SaslServer createSaslServer(String mechanism, String protocol,
String serverName, Map<String,?> props, CallbackHandler cbh)
throws SaslException {
SaslServer saslServer = null;
List<SaslServerFactory> factories = factoryCache.get(mechanism);
if (factories != null) {
for (SaslServerFactory factory : factories) {
saslServer = factory.createSaslServer(
mechanism, protocol, serverName, props, cbh);
if (saslServer != null) {
break;
}
}
}
return saslServer;
}
@Override
public String[] getMechanismNames(Map<String, ?> props) {
return factoryCache.keySet().toArray(new String[0]);
}
}
}
| 14,317 | 33.668281 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.BufferedInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
/**
* A class that provides the facilities of reading and writing
* secret keys and Tokens.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class Credentials implements Writable {
private static final Log LOG = LogFactory.getLog(Credentials.class);
private Map<Text, byte[]> secretKeysMap = new HashMap<Text, byte[]>();
private Map<Text, Token<? extends TokenIdentifier>> tokenMap =
new HashMap<Text, Token<? extends TokenIdentifier>>();
/**
* Create an empty credentials instance
*/
public Credentials() {
}
/**
* Create a copy of the given credentials
* @param credentials to copy
*/
public Credentials(Credentials credentials) {
this.addAll(credentials);
}
/**
* Returns the Token object for the alias
* @param alias the alias for the Token
* @return token for this alias
*/
public Token<? extends TokenIdentifier> getToken(Text alias) {
return tokenMap.get(alias);
}
/**
* Add a token in the storage (in memory)
* @param alias the alias for the key
* @param t the token object
*/
public void addToken(Text alias, Token<? extends TokenIdentifier> t) {
if (t != null) {
tokenMap.put(alias, t);
} else {
LOG.warn("Null token ignored for " + alias);
}
}
/**
* Return all the tokens in the in-memory map
*/
public Collection<Token<? extends TokenIdentifier>> getAllTokens() {
return tokenMap.values();
}
/**
* @return number of Tokens in the in-memory map
*/
public int numberOfTokens() {
return tokenMap.size();
}
/**
* Returns the key bytes for the alias
* @param alias the alias for the key
* @return key for this alias
*/
public byte[] getSecretKey(Text alias) {
return secretKeysMap.get(alias);
}
/**
* @return number of keys in the in-memory map
*/
public int numberOfSecretKeys() {
return secretKeysMap.size();
}
/**
* Set the key for an alias
* @param alias the alias for the key
* @param key the key bytes
*/
public void addSecretKey(Text alias, byte[] key) {
secretKeysMap.put(alias, key);
}
/**
* Remove the key for a given alias.
* @param alias the alias for the key
*/
public void removeSecretKey(Text alias) {
secretKeysMap.remove(alias);
}
/**
* Return all the secret key entries in the in-memory map
*/
public List<Text> getAllSecretKeys() {
List<Text> list = new java.util.ArrayList<Text>();
list.addAll(secretKeysMap.keySet());
return list;
}
/**
* Convenience method for reading a token storage file, and loading the Tokens
* therein in the passed UGI
* @param filename
* @param conf
* @throws IOException
*/
public static Credentials readTokenStorageFile(Path filename, Configuration conf)
throws IOException {
FSDataInputStream in = null;
Credentials credentials = new Credentials();
try {
in = filename.getFileSystem(conf).open(filename);
credentials.readTokenStorageStream(in);
in.close();
return credentials;
} catch(IOException ioe) {
throw new IOException("Exception reading " + filename, ioe);
} finally {
IOUtils.cleanup(LOG, in);
}
}
/**
* Convenience method for reading a token storage file, and loading the Tokens
* therein in the passed UGI
* @param filename
* @param conf
* @throws IOException
*/
public static Credentials readTokenStorageFile(File filename, Configuration conf)
throws IOException {
DataInputStream in = null;
Credentials credentials = new Credentials();
try {
in = new DataInputStream(new BufferedInputStream(
new FileInputStream(filename)));
credentials.readTokenStorageStream(in);
return credentials;
} catch(IOException ioe) {
throw new IOException("Exception reading " + filename, ioe);
} finally {
IOUtils.cleanup(LOG, in);
}
}
/**
* Convenience method for reading a token storage file directly from a
* datainputstream
*/
public void readTokenStorageStream(DataInputStream in) throws IOException {
byte[] magic = new byte[TOKEN_STORAGE_MAGIC.length];
in.readFully(magic);
if (!Arrays.equals(magic, TOKEN_STORAGE_MAGIC)) {
throw new IOException("Bad header found in token storage.");
}
byte version = in.readByte();
if (version != TOKEN_STORAGE_VERSION) {
throw new IOException("Unknown version " + version +
" in token storage.");
}
readFields(in);
}
private static final byte[] TOKEN_STORAGE_MAGIC =
"HDTS".getBytes(Charsets.UTF_8);
private static final byte TOKEN_STORAGE_VERSION = 0;
public void writeTokenStorageToStream(DataOutputStream os)
throws IOException {
os.write(TOKEN_STORAGE_MAGIC);
os.write(TOKEN_STORAGE_VERSION);
write(os);
}
public void writeTokenStorageFile(Path filename,
Configuration conf) throws IOException {
FSDataOutputStream os = filename.getFileSystem(conf).create(filename);
writeTokenStorageToStream(os);
os.close();
}
/**
* Stores all the keys to DataOutput
* @param out
* @throws IOException
*/
@Override
public void write(DataOutput out) throws IOException {
// write out tokens first
WritableUtils.writeVInt(out, tokenMap.size());
for(Map.Entry<Text,
Token<? extends TokenIdentifier>> e: tokenMap.entrySet()) {
e.getKey().write(out);
e.getValue().write(out);
}
// now write out secret keys
WritableUtils.writeVInt(out, secretKeysMap.size());
for(Map.Entry<Text, byte[]> e : secretKeysMap.entrySet()) {
e.getKey().write(out);
WritableUtils.writeVInt(out, e.getValue().length);
out.write(e.getValue());
}
}
/**
* Loads all the keys
* @param in
* @throws IOException
*/
@Override
public void readFields(DataInput in) throws IOException {
secretKeysMap.clear();
tokenMap.clear();
int size = WritableUtils.readVInt(in);
for(int i=0; i<size; i++) {
Text alias = new Text();
alias.readFields(in);
Token<? extends TokenIdentifier> t = new Token<TokenIdentifier>();
t.readFields(in);
tokenMap.put(alias, t);
}
size = WritableUtils.readVInt(in);
for(int i=0; i<size; i++) {
Text alias = new Text();
alias.readFields(in);
int len = WritableUtils.readVInt(in);
byte[] value = new byte[len];
in.readFully(value);
secretKeysMap.put(alias, value);
}
}
/**
* Copy all of the credentials from one credential object into another.
* Existing secrets and tokens are overwritten.
* @param other the credentials to copy
*/
public void addAll(Credentials other) {
addAll(other, true);
}
/**
* Copy all of the credentials from one credential object into another.
* Existing secrets and tokens are not overwritten.
* @param other the credentials to copy
*/
public void mergeAll(Credentials other) {
addAll(other, false);
}
private void addAll(Credentials other, boolean overwrite) {
for(Map.Entry<Text, byte[]> secret: other.secretKeysMap.entrySet()) {
Text key = secret.getKey();
if (!secretKeysMap.containsKey(key) || overwrite) {
secretKeysMap.put(key, secret.getValue());
}
}
for(Map.Entry<Text, Token<?>> token: other.tokenMap.entrySet()){
Text key = token.getKey();
if (!tokenMap.containsKey(key) || overwrite) {
tokenMap.put(key, token.getValue());
}
}
}
}
| 9,573 | 28.278287 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPropertiesResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.net.InetAddress;
import java.util.Locale;
import java.util.Map;
import java.util.TreeMap;
import javax.security.sasl.Sasl;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
/**
* Provides SaslProperties to be used for a connection.
* The default implementation is to read the values from configuration.
* This class can be overridden to provide custom SaslProperties.
* The custom class can be specified via configuration.
*
*/
public class SaslPropertiesResolver implements Configurable{
private Map<String,String> properties;
Configuration conf;
/**
* Returns an instance of SaslPropertiesResolver.
* Looks up the configuration to see if there is custom class specified.
* Constructs the instance by passing the configuration directly to the
* constructor to achieve thread safety using final fields.
* @param conf
* @return SaslPropertiesResolver
*/
public static SaslPropertiesResolver getInstance(Configuration conf) {
Class<? extends SaslPropertiesResolver> clazz =
conf.getClass(
CommonConfigurationKeysPublic.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
SaslPropertiesResolver.class, SaslPropertiesResolver.class);
return ReflectionUtils.newInstance(clazz, conf);
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
properties = new TreeMap<String,String>();
String[] qop = conf.getTrimmedStrings(
CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION,
QualityOfProtection.AUTHENTICATION.toString());
for (int i=0; i < qop.length; i++) {
qop[i] = QualityOfProtection.valueOf(
StringUtils.toUpperCase(qop[i])).getSaslQop();
}
properties.put(Sasl.QOP, StringUtils.join(",", qop));
properties.put(Sasl.SERVER_AUTH, "true");
}
@Override
public Configuration getConf() {
return conf;
}
/**
* The default Sasl Properties read from the configuration
* @return sasl Properties
*/
public Map<String,String> getDefaultProperties() {
return properties;
}
/**
* Identify the Sasl Properties to be used for a connection with a client.
* @param clientAddress client's address
* @return the sasl properties to be used for the connection.
*/
public Map<String, String> getServerProperties(InetAddress clientAddress){
return properties;
}
/**
* Identify the Sasl Properties to be used for a connection with a server.
* @param serverAddress server's address
* @return the sasl properties to be used for the connection.
*/
public Map<String, String> getClientProperties(InetAddress serverAddress){
return properties;
}
}
| 3,760 | 34.149533 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import java.util.StringTokenizer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ExitCodeException;
/**
* A simple shell-based implementation of {@link GroupMappingServiceProvider}
* that exec's the <code>groups</code> shell command to fetch the group
* memberships of a given user.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class ShellBasedUnixGroupsMapping
implements GroupMappingServiceProvider {
private static final Log LOG =
LogFactory.getLog(ShellBasedUnixGroupsMapping.class);
/**
* Returns list of groups for a user
*
* @param user get groups for this user
* @return list of groups for a given user
*/
@Override
public List<String> getGroups(String user) throws IOException {
return getUnixGroups(user);
}
/**
* Caches groups, no need to do that for this provider
*/
@Override
public void cacheGroupsRefresh() throws IOException {
// does nothing in this provider of user to groups mapping
}
/**
* Adds groups to cache, no need to do that for this provider
*
* @param groups unused
*/
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
// does nothing in this provider of user to groups mapping
}
/**
* Get the current user's group list from Unix by running the command 'groups'
* NOTE. For non-existing user it will return EMPTY list
* @param user user name
* @return the groups list that the <code>user</code> belongs to. The primary
* group is returned first.
* @throws IOException if encounter any error when running the command
*/
private static List<String> getUnixGroups(final String user) throws IOException {
String result = "";
try {
result = Shell.execCommand(Shell.getGroupsForUserCommand(user));
} catch (ExitCodeException e) {
// if we didn't get the group - just return empty list;
LOG.warn("got exception trying to get groups for user " + user + ": "
+ e.getMessage());
return new LinkedList<String>();
}
StringTokenizer tokenizer =
new StringTokenizer(result, Shell.TOKEN_SEPARATOR_REGEX);
List<String> groups = new LinkedList<String>();
while (tokenizer.hasMoreTokens()) {
groups.add(tokenizer.nextToken());
}
// remove duplicated primary group
if (!Shell.WINDOWS) {
for (int i = 1; i < groups.size(); i++) {
if (groups.get(i).equals(groups.get(0))) {
groups.remove(i);
break;
}
}
}
return groups;
}
}
| 3,745 | 32.446429 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FilterInputStream;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.sasl.RealmCallback;
import javax.security.sasl.RealmChoiceCallback;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslClient;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.GlobPattern;
import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcRequestMessageWrapper;
import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseMessageWrapper;
import org.apache.hadoop.ipc.RPC.RpcKind;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcConstants;
import org.apache.hadoop.ipc.Server.AuthProtocol;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto.SaslAuth;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto.SaslState;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.util.ProtoUtil;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.ByteString;
/**
* A utility class that encapsulates SASL logic for RPC client
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class SaslRpcClient {
public static final Log LOG = LogFactory.getLog(SaslRpcClient.class);
private final UserGroupInformation ugi;
private final Class<?> protocol;
private final InetSocketAddress serverAddr;
private final Configuration conf;
private SaslClient saslClient;
private SaslPropertiesResolver saslPropsResolver;
private AuthMethod authMethod;
private static final RpcRequestHeaderProto saslHeader = ProtoUtil
.makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,
OperationProto.RPC_FINAL_PACKET, AuthProtocol.SASL.callId,
RpcConstants.INVALID_RETRY_COUNT, RpcConstants.DUMMY_CLIENT_ID);
private static final RpcSaslProto negotiateRequest =
RpcSaslProto.newBuilder().setState(SaslState.NEGOTIATE).build();
/**
* Create a SaslRpcClient that can be used by a RPC client to negotiate
* SASL authentication with a RPC server
* @param ugi - connecting user
* @param protocol - RPC protocol
* @param serverAddr - InetSocketAddress of remote server
* @param conf - Configuration
*/
public SaslRpcClient(UserGroupInformation ugi, Class<?> protocol,
InetSocketAddress serverAddr, Configuration conf) {
this.ugi = ugi;
this.protocol = protocol;
this.serverAddr = serverAddr;
this.conf = conf;
this.saslPropsResolver = SaslPropertiesResolver.getInstance(conf);
}
@VisibleForTesting
@InterfaceAudience.Private
public Object getNegotiatedProperty(String key) {
return (saslClient != null) ? saslClient.getNegotiatedProperty(key) : null;
}
// the RPC Client has an inelegant way of handling expiration of TGTs
// acquired via a keytab. any connection failure causes a relogin, so
// the Client needs to know what authMethod was being attempted if an
// exception occurs. the SASL prep for a kerberos connection should
// ideally relogin if necessary instead of exposing this detail to the
// Client
@InterfaceAudience.Private
public AuthMethod getAuthMethod() {
return authMethod;
}
/**
* Instantiate a sasl client for the first supported auth type in the
* given list. The auth type must be defined, enabled, and the user
* must possess the required credentials, else the next auth is tried.
*
* @param authTypes to attempt in the given order
* @return SaslAuth of instantiated client
* @throws AccessControlException - client doesn't support any of the auths
* @throws IOException - misc errors
*/
private SaslAuth selectSaslClient(List<SaslAuth> authTypes)
throws SaslException, AccessControlException, IOException {
SaslAuth selectedAuthType = null;
boolean switchToSimple = false;
for (SaslAuth authType : authTypes) {
if (!isValidAuthType(authType)) {
continue; // don't know what it is, try next
}
AuthMethod authMethod = AuthMethod.valueOf(authType.getMethod());
if (authMethod == AuthMethod.SIMPLE) {
switchToSimple = true;
} else {
saslClient = createSaslClient(authType);
if (saslClient == null) { // client lacks credentials, try next
continue;
}
}
selectedAuthType = authType;
break;
}
if (saslClient == null && !switchToSimple) {
List<String> serverAuthMethods = new ArrayList<String>();
for (SaslAuth authType : authTypes) {
serverAuthMethods.add(authType.getMethod());
}
throw new AccessControlException(
"Client cannot authenticate via:" + serverAuthMethods);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Use " + selectedAuthType.getMethod() +
" authentication for protocol " + protocol.getSimpleName());
}
return selectedAuthType;
}
private boolean isValidAuthType(SaslAuth authType) {
AuthMethod authMethod;
try {
authMethod = AuthMethod.valueOf(authType.getMethod());
} catch (IllegalArgumentException iae) { // unknown auth
authMethod = null;
}
// do we know what it is? is it using our mechanism?
return authMethod != null &&
authMethod.getMechanismName().equals(authType.getMechanism());
}
/**
* Try to create a SaslClient for an authentication type. May return
* null if the type isn't supported or the client lacks the required
* credentials.
*
* @param authType - the requested authentication method
* @return SaslClient for the authType or null
* @throws SaslException - error instantiating client
* @throws IOException - misc errors
*/
private SaslClient createSaslClient(SaslAuth authType)
throws SaslException, IOException {
String saslUser = null;
// SASL requires the client and server to use the same proto and serverId
// if necessary, auth types below will verify they are valid
final String saslProtocol = authType.getProtocol();
final String saslServerName = authType.getServerId();
Map<String, String> saslProperties =
saslPropsResolver.getClientProperties(serverAddr.getAddress());
CallbackHandler saslCallback = null;
final AuthMethod method = AuthMethod.valueOf(authType.getMethod());
switch (method) {
case TOKEN: {
Token<?> token = getServerToken(authType);
if (token == null) {
return null; // tokens aren't supported or user doesn't have one
}
saslCallback = new SaslClientCallbackHandler(token);
break;
}
case KERBEROS: {
if (ugi.getRealAuthenticationMethod().getAuthMethod() !=
AuthMethod.KERBEROS) {
return null; // client isn't using kerberos
}
String serverPrincipal = getServerPrincipal(authType);
if (serverPrincipal == null) {
return null; // protocol doesn't use kerberos
}
if (LOG.isDebugEnabled()) {
LOG.debug("RPC Server's Kerberos principal name for protocol="
+ protocol.getCanonicalName() + " is " + serverPrincipal);
}
break;
}
default:
throw new IOException("Unknown authentication method " + method);
}
String mechanism = method.getMechanismName();
if (LOG.isDebugEnabled()) {
LOG.debug("Creating SASL " + mechanism + "(" + method + ") "
+ " client to authenticate to service at " + saslServerName);
}
return Sasl.createSaslClient(
new String[] { mechanism }, saslUser, saslProtocol, saslServerName,
saslProperties, saslCallback);
}
/**
* Try to locate the required token for the server.
*
* @param authType of the SASL client
* @return Token<?> for server, or null if no token available
* @throws IOException - token selector cannot be instantiated
*/
private Token<?> getServerToken(SaslAuth authType) throws IOException {
TokenInfo tokenInfo = SecurityUtil.getTokenInfo(protocol, conf);
LOG.debug("Get token info proto:"+protocol+" info:"+tokenInfo);
if (tokenInfo == null) { // protocol has no support for tokens
return null;
}
TokenSelector<?> tokenSelector = null;
try {
tokenSelector = tokenInfo.value().newInstance();
} catch (InstantiationException e) {
throw new IOException(e.toString());
} catch (IllegalAccessException e) {
throw new IOException(e.toString());
}
return tokenSelector.selectToken(
SecurityUtil.buildTokenService(serverAddr), ugi.getTokens());
}
/**
* Get the remote server's principal. The value will be obtained from
* the config and cross-checked against the server's advertised principal.
*
* @param authType of the SASL client
* @return String of the server's principal
* @throws IOException - error determining configured principal
*/
@VisibleForTesting
String getServerPrincipal(SaslAuth authType) throws IOException {
KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf);
LOG.debug("Get kerberos info proto:"+protocol+" info:"+krbInfo);
if (krbInfo == null) { // protocol has no support for kerberos
return null;
}
String serverKey = krbInfo.serverPrincipal();
if (serverKey == null) {
throw new IllegalArgumentException(
"Can't obtain server Kerberos config key from protocol="
+ protocol.getCanonicalName());
}
// construct server advertised principal for comparision
String serverPrincipal = new KerberosPrincipal(
authType.getProtocol() + "/" + authType.getServerId(),
KerberosPrincipal.KRB_NT_SRV_HST).getName();
boolean isPrincipalValid = false;
// use the pattern if defined
String serverKeyPattern = conf.get(serverKey + ".pattern");
if (serverKeyPattern != null && !serverKeyPattern.isEmpty()) {
Pattern pattern = GlobPattern.compile(serverKeyPattern);
isPrincipalValid = pattern.matcher(serverPrincipal).matches();
} else {
// check that the server advertised principal matches our conf
String confPrincipal = SecurityUtil.getServerPrincipal(
conf.get(serverKey), serverAddr.getAddress());
if (LOG.isDebugEnabled()) {
LOG.debug("getting serverKey: " + serverKey + " conf value: " + conf.get(serverKey)
+ " principal: " + confPrincipal);
}
if (confPrincipal == null || confPrincipal.isEmpty()) {
throw new IllegalArgumentException(
"Failed to specify server's Kerberos principal name");
}
KerberosName name = new KerberosName(confPrincipal);
if (name.getHostName() == null) {
throw new IllegalArgumentException(
"Kerberos principal name does NOT have the expected hostname part: "
+ confPrincipal);
}
isPrincipalValid = serverPrincipal.equals(confPrincipal);
}
if (!isPrincipalValid) {
throw new IllegalArgumentException(
"Server has invalid Kerberos principal: " + serverPrincipal);
}
return serverPrincipal;
}
/**
* Do client side SASL authentication with server via the given InputStream
* and OutputStream
*
* @param inS
* InputStream to use
* @param outS
* OutputStream to use
* @return AuthMethod used to negotiate the connection
* @throws IOException
*/
public AuthMethod saslConnect(InputStream inS, OutputStream outS)
throws IOException {
DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
DataOutputStream outStream = new DataOutputStream(new BufferedOutputStream(
outS));
// redefined if/when a SASL negotiation starts, can be queried if the
// negotiation fails
authMethod = AuthMethod.SIMPLE;
sendSaslMessage(outStream, negotiateRequest);
// loop until sasl is complete or a rpc error occurs
boolean done = false;
do {
int totalLen = inStream.readInt();
RpcResponseMessageWrapper responseWrapper =
new RpcResponseMessageWrapper();
responseWrapper.readFields(inStream);
RpcResponseHeaderProto header = responseWrapper.getMessageHeader();
switch (header.getStatus()) {
case ERROR: // might get a RPC error during
case FATAL:
throw new RemoteException(header.getExceptionClassName(),
header.getErrorMsg());
default: break;
}
if (totalLen != responseWrapper.getLength()) {
throw new SaslException("Received malformed response length");
}
if (header.getCallId() != AuthProtocol.SASL.callId) {
throw new SaslException("Non-SASL response during negotiation");
}
RpcSaslProto saslMessage =
RpcSaslProto.parseFrom(responseWrapper.getMessageBytes());
// handle sasl negotiation process
RpcSaslProto.Builder response = null;
switch (saslMessage.getState()) {
case NEGOTIATE: {
// create a compatible SASL client, throws if no supported auths
SaslAuth saslAuthType = selectSaslClient(saslMessage.getAuthsList());
// define auth being attempted, caller can query if connect fails
authMethod = AuthMethod.valueOf(saslAuthType.getMethod());
byte[] responseToken = null;
if (authMethod == AuthMethod.SIMPLE) { // switching to SIMPLE
done = true; // not going to wait for success ack
} else {
byte[] challengeToken = null;
if (saslAuthType.hasChallenge()) {
// server provided the first challenge
challengeToken = saslAuthType.getChallenge().toByteArray();
saslAuthType =
SaslAuth.newBuilder(saslAuthType).clearChallenge().build();
} else if (saslClient.hasInitialResponse()) {
challengeToken = new byte[0];
}
responseToken = (challengeToken != null)
? saslClient.evaluateChallenge(challengeToken)
: new byte[0];
}
response = createSaslReply(SaslState.INITIATE, responseToken);
response.addAuths(saslAuthType);
break;
}
case CHALLENGE: {
if (saslClient == null) {
// should probably instantiate a client to allow a server to
// demand a specific negotiation
throw new SaslException("Server sent unsolicited challenge");
}
byte[] responseToken = saslEvaluateToken(saslMessage, false);
response = createSaslReply(SaslState.RESPONSE, responseToken);
break;
}
case SUCCESS: {
// simple server sends immediate success to a SASL client for
// switch to simple
if (saslClient == null) {
authMethod = AuthMethod.SIMPLE;
} else {
saslEvaluateToken(saslMessage, true);
}
done = true;
break;
}
default: {
throw new SaslException(
"RPC client doesn't support SASL " + saslMessage.getState());
}
}
if (response != null) {
sendSaslMessage(outStream, response.build());
}
} while (!done);
return authMethod;
}
private void sendSaslMessage(DataOutputStream out, RpcSaslProto message)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Sending sasl message "+message);
}
RpcRequestMessageWrapper request =
new RpcRequestMessageWrapper(saslHeader, message);
out.writeInt(request.getLength());
request.write(out);
out.flush();
}
/**
* Evaluate the server provided challenge. The server must send a token
* if it's not done. If the server is done, the challenge token is
* optional because not all mechanisms send a final token for the client to
* update its internal state. The client must also be done after
* evaluating the optional token to ensure a malicious server doesn't
* prematurely end the negotiation with a phony success.
*
* @param saslResponse - client response to challenge
* @param serverIsDone - server negotiation state
* @throws SaslException - any problems with negotiation
*/
private byte[] saslEvaluateToken(RpcSaslProto saslResponse,
boolean serverIsDone) throws SaslException {
byte[] saslToken = null;
if (saslResponse.hasToken()) {
saslToken = saslResponse.getToken().toByteArray();
saslToken = saslClient.evaluateChallenge(saslToken);
} else if (!serverIsDone) {
// the server may only omit a token when it's done
throw new SaslException("Server challenge contains no token");
}
if (serverIsDone) {
// server tried to report success before our client completed
if (!saslClient.isComplete()) {
throw new SaslException("Client is out of sync with server");
}
// a client cannot generate a response to a success message
if (saslToken != null) {
throw new SaslException("Client generated spurious response");
}
}
return saslToken;
}
private RpcSaslProto.Builder createSaslReply(SaslState state,
byte[] responseToken) {
RpcSaslProto.Builder response = RpcSaslProto.newBuilder();
response.setState(state);
if (responseToken != null) {
response.setToken(ByteString.copyFrom(responseToken));
}
return response;
}
private boolean useWrap() {
// getNegotiatedProperty throws if client isn't complete
String qop = (String) saslClient.getNegotiatedProperty(Sasl.QOP);
// SASL wrapping is only used if the connection has a QOP, and
// the value is not auth. ex. auth-int & auth-priv
return qop != null && !"auth".equalsIgnoreCase(qop);
}
/**
* Get SASL wrapped InputStream if SASL QoP requires unwrapping,
* otherwise return original stream. Can be called only after
* saslConnect() has been called.
*
* @param in - InputStream used to make the connection
* @return InputStream that may be using SASL unwrap
* @throws IOException
*/
public InputStream getInputStream(InputStream in) throws IOException {
if (useWrap()) {
in = new WrappedInputStream(in);
}
return in;
}
/**
* Get SASL wrapped OutputStream if SASL QoP requires wrapping,
* otherwise return original stream. Can be called only after
* saslConnect() has been called.
*
* @param in - InputStream used to make the connection
* @return InputStream that may be using SASL unwrap
* @throws IOException
*/
public OutputStream getOutputStream(OutputStream out) throws IOException {
if (useWrap()) {
// the client and server negotiate a maximum buffer size that can be
// wrapped
String maxBuf = (String)saslClient.getNegotiatedProperty(Sasl.RAW_SEND_SIZE);
out = new BufferedOutputStream(new WrappedOutputStream(out),
Integer.parseInt(maxBuf));
}
return out;
}
// ideally this should be folded into the RPC decoding loop but it's
// currently split across Client and SaslRpcClient...
class WrappedInputStream extends FilterInputStream {
private ByteBuffer unwrappedRpcBuffer = ByteBuffer.allocate(0);
public WrappedInputStream(InputStream in) throws IOException {
super(in);
}
@Override
public int read() throws IOException {
byte[] b = new byte[1];
int n = read(b, 0, 1);
return (n != -1) ? b[0] : -1;
}
@Override
public int read(byte b[]) throws IOException {
return read(b, 0, b.length);
}
@Override
public synchronized int read(byte[] buf, int off, int len) throws IOException {
// fill the buffer with the next RPC message
if (unwrappedRpcBuffer.remaining() == 0) {
readNextRpcPacket();
}
// satisfy as much of the request as possible
int readLen = Math.min(len, unwrappedRpcBuffer.remaining());
unwrappedRpcBuffer.get(buf, off, readLen);
return readLen;
}
// all messages must be RPC SASL wrapped, else an exception is thrown
private void readNextRpcPacket() throws IOException {
LOG.debug("reading next wrapped RPC packet");
DataInputStream dis = new DataInputStream(in);
int rpcLen = dis.readInt();
byte[] rpcBuf = new byte[rpcLen];
dis.readFully(rpcBuf);
// decode the RPC header
ByteArrayInputStream bis = new ByteArrayInputStream(rpcBuf);
RpcResponseHeaderProto.Builder headerBuilder =
RpcResponseHeaderProto.newBuilder();
headerBuilder.mergeDelimitedFrom(bis);
boolean isWrapped = false;
// Must be SASL wrapped, verify and decode.
if (headerBuilder.getCallId() == AuthProtocol.SASL.callId) {
RpcSaslProto.Builder saslMessage = RpcSaslProto.newBuilder();
saslMessage.mergeDelimitedFrom(bis);
if (saslMessage.getState() == SaslState.WRAP) {
isWrapped = true;
byte[] token = saslMessage.getToken().toByteArray();
if (LOG.isDebugEnabled()) {
LOG.debug("unwrapping token of length:" + token.length);
}
token = saslClient.unwrap(token, 0, token.length);
unwrappedRpcBuffer = ByteBuffer.wrap(token);
}
}
if (!isWrapped) {
throw new SaslException("Server sent non-wrapped response");
}
}
}
class WrappedOutputStream extends FilterOutputStream {
public WrappedOutputStream(OutputStream out) throws IOException {
super(out);
}
@Override
public void write(byte[] buf, int off, int len) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("wrapping token of length:" + len);
}
buf = saslClient.wrap(buf, off, len);
RpcSaslProto saslMessage = RpcSaslProto.newBuilder()
.setState(SaslState.WRAP)
.setToken(ByteString.copyFrom(buf, 0, buf.length))
.build();
RpcRequestMessageWrapper request =
new RpcRequestMessageWrapper(saslHeader, saslMessage);
DataOutputStream dob = new DataOutputStream(out);
dob.writeInt(request.getLength());
request.write(dob);
}
}
/** Release resources used by wrapped saslClient */
public void dispose() throws SaslException {
if (saslClient != null) {
saslClient.dispose();
saslClient = null;
}
}
private static class SaslClientCallbackHandler implements CallbackHandler {
private final String userName;
private final char[] userPassword;
public SaslClientCallbackHandler(Token<? extends TokenIdentifier> token) {
this.userName = SaslRpcServer.encodeIdentifier(token.getIdentifier());
this.userPassword = SaslRpcServer.encodePassword(token.getPassword());
}
@Override
public void handle(Callback[] callbacks)
throws UnsupportedCallbackException {
NameCallback nc = null;
PasswordCallback pc = null;
RealmCallback rc = null;
for (Callback callback : callbacks) {
if (callback instanceof RealmChoiceCallback) {
continue;
} else if (callback instanceof NameCallback) {
nc = (NameCallback) callback;
} else if (callback instanceof PasswordCallback) {
pc = (PasswordCallback) callback;
} else if (callback instanceof RealmCallback) {
rc = (RealmCallback) callback;
} else {
throw new UnsupportedCallbackException(callback,
"Unrecognized SASL client callback");
}
}
if (nc != null) {
if (LOG.isDebugEnabled())
LOG.debug("SASL client callback: setting username: " + userName);
nc.setName(userName);
}
if (pc != null) {
if (LOG.isDebugEnabled())
LOG.debug("SASL client callback: setting userPassword");
pc.setPassword(userPassword);
}
if (rc != null) {
if (LOG.isDebugEnabled())
LOG.debug("SASL client callback: setting realm: "
+ rc.getDefaultText());
rc.setText(rc.getDefaultText());
}
}
}
}
| 26,743 | 37.370158 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.TokenInfo;
@Evolving
@LimitedPrivate({"MapReduce", "HDFS"})
/**
* Interface used by RPC to get the Security information for a given
* protocol.
*/
public abstract class SecurityInfo {
/**
* Get the KerberosInfo for a given protocol.
* @param protocol interface class
* @param conf configuration
* @return KerberosInfo
*/
public abstract KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf);
/**
* Get the TokenInfo for a given protocol.
* @param protocol interface class
* @param conf configuration object.
* @return TokenInfo instance
*/
public abstract TokenInfo getTokenInfo(Class<?> protocol, Configuration conf);
}
| 1,754 | 33.411765 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/GroupMappingServiceProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
/**
* An interface for the implementation of a user-to-groups mapping service
* used by {@link Groups}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface GroupMappingServiceProvider {
public static final String GROUP_MAPPING_CONFIG_PREFIX = CommonConfigurationKeysPublic.HADOOP_SECURITY_GROUP_MAPPING;
/**
* Get all various group memberships of a given user.
* Returns EMPTY list in case of non-existing user
* @param user User's name
* @return group memberships of user
* @throws IOException
*/
public List<String> getGroups(String user) throws IOException;
/**
* Refresh the cache of groups and user mapping
* @throws IOException
*/
public void cacheGroupsRefresh() throws IOException;
/**
* Caches the group user information
* @param groups list of groups to add to cache
* @throws IOException
*/
public void cacheGroupsAdd(List<String> groups) throws IOException;
}
| 2,029 | 35.25 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol use
*
*/
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface RefreshUserMappingsProtocol {
/**
* Version 1: Initial version.
*/
public static final long versionID = 1L;
/**
* Refresh user to group mappings.
* @throws IOException
*/
@Idempotent
public void refreshUserToGroupsMappings() throws IOException;
/**
* Refresh superuser proxy group list
* @throws IOException
*/
@Idempotent
public void refreshSuperUserGroupsConfiguration() throws IOException;
}
| 1,811 | 30.789474 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.List;
import java.util.ServiceLoader;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.kerberos.KerberosTicket;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.util.StringUtils;
//this will need to be replaced someday when there is a suitable replacement
import sun.net.dns.ResolverConfiguration;
import sun.net.util.IPAddressUtil;
import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class SecurityUtil {
public static final Log LOG = LogFactory.getLog(SecurityUtil.class);
public static final String HOSTNAME_PATTERN = "_HOST";
public static final String FAILED_TO_GET_UGI_MSG_HEADER =
"Failed to obtain user group information:";
// controls whether buildTokenService will use an ip or host/ip as given
// by the user
@VisibleForTesting
static boolean useIpForTokenService;
@VisibleForTesting
static HostResolver hostResolver;
static {
Configuration conf = new Configuration();
boolean useIp = conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP,
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT);
setTokenServiceUseIp(useIp);
}
/**
* For use only by tests and initialization
*/
@InterfaceAudience.Private
@VisibleForTesting
public static void setTokenServiceUseIp(boolean flag) {
useIpForTokenService = flag;
hostResolver = !useIpForTokenService
? new QualifiedHostResolver()
: new StandardHostResolver();
}
/**
* TGS must have the server principal of the form "krbtgt/FOO@FOO".
* @param principal
* @return true or false
*/
static boolean
isTGSPrincipal(KerberosPrincipal principal) {
if (principal == null)
return false;
if (principal.getName().equals("krbtgt/" + principal.getRealm() +
"@" + principal.getRealm())) {
return true;
}
return false;
}
/**
* Check whether the server principal is the TGS's principal
* @param ticket the original TGT (the ticket that is obtained when a
* kinit is done)
* @return true or false
*/
protected static boolean isOriginalTGT(KerberosTicket ticket) {
return isTGSPrincipal(ticket.getServer());
}
/**
* Convert Kerberos principal name pattern to valid Kerberos principal
* names. It replaces hostname pattern with hostname, which should be
* fully-qualified domain name. If hostname is null or "0.0.0.0", it uses
* dynamically looked-up fqdn of the current host instead.
*
* @param principalConfig
* the Kerberos principal name conf value to convert
* @param hostname
* the fully-qualified domain name used for substitution
* @return converted Kerberos principal name
* @throws IOException if the client address cannot be determined
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static String getServerPrincipal(String principalConfig,
String hostname) throws IOException {
String[] components = getComponents(principalConfig);
if (components == null || components.length != 3
|| !components[1].equals(HOSTNAME_PATTERN)) {
return principalConfig;
} else {
return replacePattern(components, hostname);
}
}
/**
* Convert Kerberos principal name pattern to valid Kerberos principal names.
* This method is similar to {@link #getServerPrincipal(String, String)},
* except 1) the reverse DNS lookup from addr to hostname is done only when
* necessary, 2) param addr can't be null (no default behavior of using local
* hostname when addr is null).
*
* @param principalConfig
* Kerberos principal name pattern to convert
* @param addr
* InetAddress of the host used for substitution
* @return converted Kerberos principal name
* @throws IOException if the client address cannot be determined
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static String getServerPrincipal(String principalConfig,
InetAddress addr) throws IOException {
String[] components = getComponents(principalConfig);
if (components == null || components.length != 3
|| !components[1].equals(HOSTNAME_PATTERN)) {
return principalConfig;
} else {
if (addr == null) {
throw new IOException("Can't replace " + HOSTNAME_PATTERN
+ " pattern since client address is null");
}
return replacePattern(components, addr.getCanonicalHostName());
}
}
private static String[] getComponents(String principalConfig) {
if (principalConfig == null)
return null;
return principalConfig.split("[/@]");
}
private static String replacePattern(String[] components, String hostname)
throws IOException {
String fqdn = hostname;
if (fqdn == null || fqdn.isEmpty() || fqdn.equals("0.0.0.0")) {
fqdn = getLocalHostName();
}
return components[0] + "/" +
StringUtils.toLowerCase(fqdn) + "@" + components[2];
}
static String getLocalHostName() throws UnknownHostException {
return InetAddress.getLocalHost().getCanonicalHostName();
}
/**
* Login as a principal specified in config. Substitute $host in
* user's Kerberos principal name with a dynamically looked-up fully-qualified
* domain name of the current host.
*
* @param conf
* conf to use
* @param keytabFileKey
* the key to look for keytab file in conf
* @param userNameKey
* the key to look for user's Kerberos principal name in conf
* @throws IOException if login fails
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void login(final Configuration conf,
final String keytabFileKey, final String userNameKey) throws IOException {
login(conf, keytabFileKey, userNameKey, getLocalHostName());
}
/**
* Login as a principal specified in config. Substitute $host in user's Kerberos principal
* name with hostname. If non-secure mode - return. If no keytab available -
* bail out with an exception
*
* @param conf
* conf to use
* @param keytabFileKey
* the key to look for keytab file in conf
* @param userNameKey
* the key to look for user's Kerberos principal name in conf
* @param hostname
* hostname to use for substitution
* @throws IOException if the config doesn't specify a keytab
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void login(final Configuration conf,
final String keytabFileKey, final String userNameKey, String hostname)
throws IOException {
if(! UserGroupInformation.isSecurityEnabled())
return;
String keytabFilename = conf.get(keytabFileKey);
if (keytabFilename == null || keytabFilename.length() == 0) {
throw new IOException("Running in secure mode, but config doesn't have a keytab");
}
String principalConfig = conf.get(userNameKey, System
.getProperty("user.name"));
String principalName = SecurityUtil.getServerPrincipal(principalConfig,
hostname);
UserGroupInformation.loginUserFromKeytab(principalName, keytabFilename);
}
/**
* create the service name for a Delegation token
* @param uri of the service
* @param defPort is used if the uri lacks a port
* @return the token service, or null if no authority
* @see #buildTokenService(InetSocketAddress)
*/
public static String buildDTServiceName(URI uri, int defPort) {
String authority = uri.getAuthority();
if (authority == null) {
return null;
}
InetSocketAddress addr = NetUtils.createSocketAddr(authority, defPort);
return buildTokenService(addr).toString();
}
/**
* Get the host name from the principal name of format <service>/host@realm.
* @param principalName principal name of format as described above
* @return host name if the the string conforms to the above format, else null
*/
public static String getHostFromPrincipal(String principalName) {
return new HadoopKerberosName(principalName).getHostName();
}
private static ServiceLoader<SecurityInfo> securityInfoProviders =
ServiceLoader.load(SecurityInfo.class);
private static SecurityInfo[] testProviders = new SecurityInfo[0];
/**
* Test setup method to register additional providers.
* @param providers a list of high priority providers to use
*/
@InterfaceAudience.Private
public static void setSecurityInfoProviders(SecurityInfo... providers) {
testProviders = providers;
}
/**
* Look up the KerberosInfo for a given protocol. It searches all known
* SecurityInfo providers.
* @param protocol the protocol class to get the information for
* @param conf configuration object
* @return the KerberosInfo or null if it has no KerberosInfo defined
*/
public static KerberosInfo
getKerberosInfo(Class<?> protocol, Configuration conf) {
for(SecurityInfo provider: testProviders) {
KerberosInfo result = provider.getKerberosInfo(protocol, conf);
if (result != null) {
return result;
}
}
synchronized (securityInfoProviders) {
for(SecurityInfo provider: securityInfoProviders) {
KerberosInfo result = provider.getKerberosInfo(protocol, conf);
if (result != null) {
return result;
}
}
}
return null;
}
/**
* Look up the TokenInfo for a given protocol. It searches all known
* SecurityInfo providers.
* @param protocol The protocol class to get the information for.
* @param conf Configuration object
* @return the TokenInfo or null if it has no KerberosInfo defined
*/
public static TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
for(SecurityInfo provider: testProviders) {
TokenInfo result = provider.getTokenInfo(protocol, conf);
if (result != null) {
return result;
}
}
synchronized (securityInfoProviders) {
for(SecurityInfo provider: securityInfoProviders) {
TokenInfo result = provider.getTokenInfo(protocol, conf);
if (result != null) {
return result;
}
}
}
return null;
}
/**
* Decode the given token's service field into an InetAddress
* @param token from which to obtain the service
* @return InetAddress for the service
*/
public static InetSocketAddress getTokenServiceAddr(Token<?> token) {
return NetUtils.createSocketAddr(token.getService().toString());
}
/**
* Set the given token's service to the format expected by the RPC client
* @param token a delegation token
* @param addr the socket for the rpc connection
*/
public static void setTokenService(Token<?> token, InetSocketAddress addr) {
Text service = buildTokenService(addr);
if (token != null) {
token.setService(service);
if (LOG.isDebugEnabled()) {
LOG.debug("Acquired token "+token); // Token#toString() prints service
}
} else {
LOG.warn("Failed to get token for service "+service);
}
}
/**
* Construct the service key for a token
* @param addr InetSocketAddress of remote connection with a token
* @return "ip:port" or "host:port" depending on the value of
* hadoop.security.token.service.use_ip
*/
public static Text buildTokenService(InetSocketAddress addr) {
String host = null;
if (useIpForTokenService) {
if (addr.isUnresolved()) { // host has no ip address
throw new IllegalArgumentException(
new UnknownHostException(addr.getHostName())
);
}
host = addr.getAddress().getHostAddress();
} else {
host = StringUtils.toLowerCase(addr.getHostName());
}
return new Text(host + ":" + addr.getPort());
}
/**
* Construct the service key for a token
* @param uri of remote connection with a token
* @return "ip:port" or "host:port" depending on the value of
* hadoop.security.token.service.use_ip
*/
public static Text buildTokenService(URI uri) {
return buildTokenService(NetUtils.createSocketAddr(uri.getAuthority()));
}
/**
* Perform the given action as the daemon's login user. If the login
* user cannot be determined, this will log a FATAL error and exit
* the whole JVM.
*/
public static <T> T doAsLoginUserOrFatal(PrivilegedAction<T> action) {
if (UserGroupInformation.isSecurityEnabled()) {
UserGroupInformation ugi = null;
try {
ugi = UserGroupInformation.getLoginUser();
} catch (IOException e) {
LOG.fatal("Exception while getting login user", e);
e.printStackTrace();
Runtime.getRuntime().exit(-1);
}
return ugi.doAs(action);
} else {
return action.run();
}
}
/**
* Perform the given action as the daemon's login user. If an
* InterruptedException is thrown, it is converted to an IOException.
*
* @param action the action to perform
* @return the result of the action
* @throws IOException in the event of error
*/
public static <T> T doAsLoginUser(PrivilegedExceptionAction<T> action)
throws IOException {
return doAsUser(UserGroupInformation.getLoginUser(), action);
}
/**
* Perform the given action as the daemon's current user. If an
* InterruptedException is thrown, it is converted to an IOException.
*
* @param action the action to perform
* @return the result of the action
* @throws IOException in the event of error
*/
public static <T> T doAsCurrentUser(PrivilegedExceptionAction<T> action)
throws IOException {
return doAsUser(UserGroupInformation.getCurrentUser(), action);
}
private static <T> T doAsUser(UserGroupInformation ugi,
PrivilegedExceptionAction<T> action) throws IOException {
try {
return ugi.doAs(action);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
/**
* Resolves a host subject to the security requirements determined by
* hadoop.security.token.service.use_ip.
*
* @param hostname host or ip to resolve
* @return a resolved host
* @throws UnknownHostException if the host doesn't exist
*/
@InterfaceAudience.Private
public static
InetAddress getByName(String hostname) throws UnknownHostException {
return hostResolver.getByName(hostname);
}
interface HostResolver {
InetAddress getByName(String host) throws UnknownHostException;
}
/**
* Uses standard java host resolution
*/
static class StandardHostResolver implements HostResolver {
@Override
public InetAddress getByName(String host) throws UnknownHostException {
return InetAddress.getByName(host);
}
}
/**
* This an alternate resolver with important properties that the standard
* java resolver lacks:
* 1) The hostname is fully qualified. This avoids security issues if not
* all hosts in the cluster do not share the same search domains. It
* also prevents other hosts from performing unnecessary dns searches.
* In contrast, InetAddress simply returns the host as given.
* 2) The InetAddress is instantiated with an exact host and IP to prevent
* further unnecessary lookups. InetAddress may perform an unnecessary
* reverse lookup for an IP.
* 3) A call to getHostName() will always return the qualified hostname, or
* more importantly, the IP if instantiated with an IP. This avoids
* unnecessary dns timeouts if the host is not resolvable.
* 4) Point 3 also ensures that if the host is re-resolved, ex. during a
* connection re-attempt, that a reverse lookup to host and forward
* lookup to IP is not performed since the reverse/forward mappings may
* not always return the same IP. If the client initiated a connection
* with an IP, then that IP is all that should ever be contacted.
*
* NOTE: this resolver is only used if:
* hadoop.security.token.service.use_ip=false
*/
protected static class QualifiedHostResolver implements HostResolver {
@SuppressWarnings("unchecked")
private List<String> searchDomains =
ResolverConfiguration.open().searchlist();
/**
* Create an InetAddress with a fully qualified hostname of the given
* hostname. InetAddress does not qualify an incomplete hostname that
* is resolved via the domain search list.
* {@link InetAddress#getCanonicalHostName()} will fully qualify the
* hostname, but it always return the A record whereas the given hostname
* may be a CNAME.
*
* @param host a hostname or ip address
* @return InetAddress with the fully qualified hostname or ip
* @throws UnknownHostException if host does not exist
*/
@Override
public InetAddress getByName(String host) throws UnknownHostException {
InetAddress addr = null;
if (IPAddressUtil.isIPv4LiteralAddress(host)) {
// use ipv4 address as-is
byte[] ip = IPAddressUtil.textToNumericFormatV4(host);
addr = InetAddress.getByAddress(host, ip);
} else if (IPAddressUtil.isIPv6LiteralAddress(host)) {
// use ipv6 address as-is
byte[] ip = IPAddressUtil.textToNumericFormatV6(host);
addr = InetAddress.getByAddress(host, ip);
} else if (host.endsWith(".")) {
// a rooted host ends with a dot, ex. "host."
// rooted hosts never use the search path, so only try an exact lookup
addr = getByExactName(host);
} else if (host.contains(".")) {
// the host contains a dot (domain), ex. "host.domain"
// try an exact host lookup, then fallback to search list
addr = getByExactName(host);
if (addr == null) {
addr = getByNameWithSearch(host);
}
} else {
// it's a simple host with no dots, ex. "host"
// try the search list, then fallback to exact host
InetAddress loopback = InetAddress.getByName(null);
if (host.equalsIgnoreCase(loopback.getHostName())) {
addr = InetAddress.getByAddress(host, loopback.getAddress());
} else {
addr = getByNameWithSearch(host);
if (addr == null) {
addr = getByExactName(host);
}
}
}
// unresolvable!
if (addr == null) {
throw new UnknownHostException(host);
}
return addr;
}
InetAddress getByExactName(String host) {
InetAddress addr = null;
// InetAddress will use the search list unless the host is rooted
// with a trailing dot. The trailing dot will disable any use of the
// search path in a lower level resolver. See RFC 1535.
String fqHost = host;
if (!fqHost.endsWith(".")) fqHost += ".";
try {
addr = getInetAddressByName(fqHost);
// can't leave the hostname as rooted or other parts of the system
// malfunction, ex. kerberos principals are lacking proper host
// equivalence for rooted/non-rooted hostnames
addr = InetAddress.getByAddress(host, addr.getAddress());
} catch (UnknownHostException e) {
// ignore, caller will throw if necessary
}
return addr;
}
InetAddress getByNameWithSearch(String host) {
InetAddress addr = null;
if (host.endsWith(".")) { // already qualified?
addr = getByExactName(host);
} else {
for (String domain : searchDomains) {
String dot = !domain.startsWith(".") ? "." : "";
addr = getByExactName(host + dot + domain);
if (addr != null) break;
}
}
return addr;
}
// implemented as a separate method to facilitate unit testing
InetAddress getInetAddressByName(String host) throws UnknownHostException {
return InetAddress.getByName(host);
}
void setSearchDomains(String ... domains) {
searchDomains = Arrays.asList(domains);
}
}
public static AuthenticationMethod getAuthenticationMethod(Configuration conf) {
String value = conf.get(HADOOP_SECURITY_AUTHENTICATION, "simple");
try {
return Enum.valueOf(AuthenticationMethod.class,
StringUtils.toUpperCase(value));
} catch (IllegalArgumentException iae) {
throw new IllegalArgumentException("Invalid attribute value for " +
HADOOP_SECURITY_AUTHENTICATION + " of " + value);
}
}
public static void setAuthenticationMethod(
AuthenticationMethod authenticationMethod, Configuration conf) {
if (authenticationMethod == null) {
authenticationMethod = AuthenticationMethod.SIMPLE;
}
conf.set(HADOOP_SECURITY_AUTHENTICATION,
StringUtils.toLowerCase(authenticationMethod.toString()));
}
/*
* Check if a given port is privileged.
* The ports with number smaller than 1024 are treated as privileged ports in
* unix/linux system. For other operating systems, use this method with care.
* For example, Windows doesn't have the concept of privileged ports.
* However, it may be used at Windows client to check port of linux server.
*
* @param port the port number
* @return true for privileged ports, false otherwise
*
*/
public static boolean isPrivilegedPort(final int port) {
return port < 1024;
}
}
| 23,270 | 35.247664 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* Initializes hadoop-auth AuthenticationFilter which provides support for
* Kerberos HTTP SPNEGO authentication.
* <p/>
* It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO
* authentication for Hadoop JobTracker, NameNode, DataNodes and
* TaskTrackers.
* <p/>
* Refer to the <code>core-default.xml</code> file, after the comment
* 'HTTP Authentication' for details on the configuration options.
* All related configuration properties have 'hadoop.http.authentication.'
* as prefix.
*/
public class AuthenticationFilterInitializer extends FilterInitializer {
static final String PREFIX = "hadoop.http.authentication.";
/**
* Initializes hadoop-auth AuthenticationFilter.
* <p/>
* Propagates to hadoop-auth AuthenticationFilter configuration all Hadoop
* configuration properties prefixed with "hadoop.http.authentication."
*
* @param container The filter container
* @param conf Configuration for run-time parameters
*/
@Override
public void initFilter(FilterContainer container, Configuration conf) {
Map<String, String> filterConfig = getFilterConfigMap(conf, PREFIX);
container.addFilter("authentication",
AuthenticationFilter.class.getName(),
filterConfig);
}
public static Map<String, String> getFilterConfigMap(Configuration conf,
String prefix) {
Map<String, String> filterConfig = new HashMap<String, String>();
//setting the cookie path to root '/' so it is used for all resources.
filterConfig.put(AuthenticationFilter.COOKIE_PATH, "/");
for (Map.Entry<String, String> entry : conf) {
String name = entry.getKey();
if (name.startsWith(prefix)) {
String value = conf.get(name);
name = name.substring(prefix.length());
filterConfig.put(name, value);
}
}
//Resolve _HOST into bind address
String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
String principal = filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
if (principal != null) {
try {
principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
}
catch (IOException ex) {
throw new RuntimeException("Could not resolve Kerberos principal name: " + ex.toString(), ex);
}
filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL, principal);
}
return filterConfig;
}
}
| 3,696 | 36.72449 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KerberosInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.lang.annotation.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Indicates Kerberos related information to be used
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public @interface KerberosInfo {
/** Key for getting server's Kerberos principal name from Configuration */
String serverPrincipal();
String clientPrincipal() default "";
}
| 1,385 | 35.473684 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/NetgroupCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Class that caches the netgroups and inverts group-to-user map
* to user-to-group map, primarily intended for use with
* netgroups (as returned by getent netgrgoup) which only returns
* group to user mapping.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class NetgroupCache {
private static ConcurrentHashMap<String, Set<String>> userToNetgroupsMap =
new ConcurrentHashMap<String, Set<String>>();
/**
* Get netgroups for a given user
*
* @param user get groups for this user
* @param groups put groups into this List
*/
public static void getNetgroups(final String user,
List<String> groups) {
Set<String> userGroups = userToNetgroupsMap.get(user);
//ConcurrentHashMap does not allow null values;
//So null value check can be used to check if the key exists
if (userGroups != null) {
groups.addAll(userGroups);
}
}
/**
* Get the list of cached netgroups
*
* @return list of cached groups
*/
public static List<String> getNetgroupNames() {
return new LinkedList<String>(getGroups());
}
private static Set<String> getGroups() {
Set<String> allGroups = new HashSet<String> ();
for (Set<String> userGroups : userToNetgroupsMap.values()) {
allGroups.addAll(userGroups);
}
return allGroups;
}
/**
* Returns true if a given netgroup is cached
*
* @param group check if this group is cached
* @return true if group is cached, false otherwise
*/
public static boolean isCached(String group) {
return getGroups().contains(group);
}
/**
* Clear the cache
*/
public static void clear() {
userToNetgroupsMap.clear();
}
/**
* Add group to cache
*
* @param group name of the group to add to cache
* @param users list of users for a given group
*/
public static void add(String group, List<String> users) {
for (String user : users) {
Set<String> userGroups = userToNetgroupsMap.get(user);
// ConcurrentHashMap does not allow null values;
// So null value check can be used to check if the key exists
if (userGroups == null) {
//Generate a ConcurrentHashSet (backed by the keyset of the ConcurrentHashMap)
userGroups =
Collections.newSetFromMap(new ConcurrentHashMap<String,Boolean>());
Set<String> currentSet = userToNetgroupsMap.putIfAbsent(user, userGroups);
if (currentSet != null) {
userGroups = currentSet;
}
}
userGroups.add(group);
}
}
}
| 3,716 | 31.043103 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.InputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ReadableByteChannel;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A SaslInputStream is composed of an InputStream and a SaslServer (or
* SaslClient) so that read() methods return data that are read in from the
* underlying InputStream but have been additionally processed by the SaslServer
* (or SaslClient) object. The SaslServer (or SaslClient) object must be fully
* initialized before being used by a SaslInputStream.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class SaslInputStream extends InputStream implements ReadableByteChannel {
public static final Log LOG = LogFactory.getLog(SaslInputStream.class);
private final DataInputStream inStream;
/** Should we wrap the communication channel? */
private final boolean useWrap;
/*
* data read from the underlying input stream before being processed by SASL
*/
private byte[] saslToken;
private final SaslClient saslClient;
private final SaslServer saslServer;
private byte[] lengthBuf = new byte[4];
/*
* buffer holding data that have been processed by SASL, but have not been
* read out
*/
private byte[] obuffer;
// position of the next "new" byte
private int ostart = 0;
// position of the last "new" byte
private int ofinish = 0;
// whether or not this stream is open
private boolean isOpen = true;
private static int unsignedBytesToInt(byte[] buf) {
if (buf.length != 4) {
throw new IllegalArgumentException(
"Cannot handle byte array other than 4 bytes");
}
int result = 0;
for (int i = 0; i < 4; i++) {
result <<= 8;
result |= ((int) buf[i] & 0xff);
}
return result;
}
/**
* Read more data and get them processed <br>
* Entry condition: ostart = ofinish <br>
* Exit condition: ostart <= ofinish <br>
*
* return (ofinish-ostart) (we have this many bytes for you), 0 (no data now,
* but could have more later), or -1 (absolutely no more data)
*/
private int readMoreData() throws IOException {
try {
inStream.readFully(lengthBuf);
int length = unsignedBytesToInt(lengthBuf);
if (LOG.isDebugEnabled())
LOG.debug("Actual length is " + length);
saslToken = new byte[length];
inStream.readFully(saslToken);
} catch (EOFException e) {
return -1;
}
try {
if (saslServer != null) { // using saslServer
obuffer = saslServer.unwrap(saslToken, 0, saslToken.length);
} else { // using saslClient
obuffer = saslClient.unwrap(saslToken, 0, saslToken.length);
}
} catch (SaslException se) {
try {
disposeSasl();
} catch (SaslException ignored) {
}
throw se;
}
ostart = 0;
if (obuffer == null)
ofinish = 0;
else
ofinish = obuffer.length;
return ofinish;
}
/**
* Disposes of any system resources or security-sensitive information Sasl
* might be using.
*
* @exception SaslException
* if a SASL error occurs.
*/
private void disposeSasl() throws SaslException {
if (saslClient != null) {
saslClient.dispose();
}
if (saslServer != null) {
saslServer.dispose();
}
}
/**
* Constructs a SASLInputStream from an InputStream and a SaslServer <br>
* Note: if the specified InputStream or SaslServer is null, a
* NullPointerException may be thrown later when they are used.
*
* @param inStream
* the InputStream to be processed
* @param saslServer
* an initialized SaslServer object
*/
public SaslInputStream(InputStream inStream, SaslServer saslServer) {
this.inStream = new DataInputStream(inStream);
this.saslServer = saslServer;
this.saslClient = null;
String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP);
this.useWrap = qop != null && !"auth".equalsIgnoreCase(qop);
}
/**
* Constructs a SASLInputStream from an InputStream and a SaslClient <br>
* Note: if the specified InputStream or SaslClient is null, a
* NullPointerException may be thrown later when they are used.
*
* @param inStream
* the InputStream to be processed
* @param saslClient
* an initialized SaslClient object
*/
public SaslInputStream(InputStream inStream, SaslClient saslClient) {
this.inStream = new DataInputStream(inStream);
this.saslServer = null;
this.saslClient = saslClient;
String qop = (String) saslClient.getNegotiatedProperty(Sasl.QOP);
this.useWrap = qop != null && !"auth".equalsIgnoreCase(qop);
}
/**
* Reads the next byte of data from this input stream. The value byte is
* returned as an <code>int</code> in the range <code>0</code> to
* <code>255</code>. If no byte is available because the end of the stream has
* been reached, the value <code>-1</code> is returned. This method blocks
* until input data is available, the end of the stream is detected, or an
* exception is thrown.
* <p>
*
* @return the next byte of data, or <code>-1</code> if the end of the stream
* is reached.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public int read() throws IOException {
if (!useWrap) {
return inStream.read();
}
if (ostart >= ofinish) {
// we loop for new data as we are blocking
int i = 0;
while (i == 0)
i = readMoreData();
if (i == -1)
return -1;
}
return ((int) obuffer[ostart++] & 0xff);
}
/**
* Reads up to <code>b.length</code> bytes of data from this input stream into
* an array of bytes.
* <p>
* The <code>read</code> method of <code>InputStream</code> calls the
* <code>read</code> method of three arguments with the arguments
* <code>b</code>, <code>0</code>, and <code>b.length</code>.
*
* @param b
* the buffer into which the data is read.
* @return the total number of bytes read into the buffer, or <code>-1</code>
* is there is no more data because the end of the stream has been
* reached.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
/**
* Reads up to <code>len</code> bytes of data from this input stream into an
* array of bytes. This method blocks until some input is available. If the
* first argument is <code>null,</code> up to <code>len</code> bytes are read
* and discarded.
*
* @param b
* the buffer into which the data is read.
* @param off
* the start offset of the data.
* @param len
* the maximum number of bytes read.
* @return the total number of bytes read into the buffer, or <code>-1</code>
* if there is no more data because the end of the stream has been
* reached.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (!useWrap) {
return inStream.read(b, off, len);
}
if (ostart >= ofinish) {
// we loop for new data as we are blocking
int i = 0;
while (i == 0)
i = readMoreData();
if (i == -1)
return -1;
}
if (len <= 0) {
return 0;
}
int available = ofinish - ostart;
if (len < available)
available = len;
if (b != null) {
System.arraycopy(obuffer, ostart, b, off, available);
}
ostart = ostart + available;
return available;
}
/**
* Skips <code>n</code> bytes of input from the bytes that can be read from
* this input stream without blocking.
*
* <p>
* Fewer bytes than requested might be skipped. The actual number of bytes
* skipped is equal to <code>n</code> or the result of a call to
* {@link #available() <code>available</code>}, whichever is smaller. If
* <code>n</code> is less than zero, no bytes are skipped.
*
* <p>
* The actual number of bytes skipped is returned.
*
* @param n
* the number of bytes to be skipped.
* @return the actual number of bytes skipped.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public long skip(long n) throws IOException {
if (!useWrap) {
return inStream.skip(n);
}
int available = ofinish - ostart;
if (n > available) {
n = available;
}
if (n < 0) {
return 0;
}
ostart += n;
return n;
}
/**
* Returns the number of bytes that can be read from this input stream without
* blocking. The <code>available</code> method of <code>InputStream</code>
* returns <code>0</code>. This method <B>should</B> be overridden by
* subclasses.
*
* @return the number of bytes that can be read from this input stream without
* blocking.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public int available() throws IOException {
if (!useWrap) {
return inStream.available();
}
return (ofinish - ostart);
}
/**
* Closes this input stream and releases any system resources associated with
* the stream.
* <p>
* The <code>close</code> method of <code>SASLInputStream</code> calls the
* <code>close</code> method of its underlying input stream.
*
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void close() throws IOException {
disposeSasl();
ostart = 0;
ofinish = 0;
inStream.close();
isOpen = false;
}
/**
* Tests if this input stream supports the <code>mark</code> and
* <code>reset</code> methods, which it does not.
*
* @return <code>false</code>, since this class does not support the
* <code>mark</code> and <code>reset</code> methods.
*/
@Override
public boolean markSupported() {
return false;
}
@Override
public boolean isOpen() {
return isOpen;
}
@Override
public int read(ByteBuffer dst) throws IOException {
int bytesRead = 0;
if (dst.hasArray()) {
bytesRead = read(dst.array(), dst.arrayOffset() + dst.position(),
dst.remaining());
if (bytesRead > -1) {
dst.position(dst.position() + bytesRead);
}
} else {
byte[] buf = new byte[dst.remaining()];
bytesRead = read(buf);
if (bytesRead > -1) {
dst.put(buf, 0, bytesRead);
}
}
return bytesRead;
}
}
| 11,922 | 30.293963 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/WhitelistBasedResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Map;
import java.util.TreeMap;
import javax.security.sasl.Sasl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SaslPropertiesResolver;
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
import org.apache.hadoop.util.CombinedIPWhiteList;
import org.apache.hadoop.util.StringUtils;
/**
* An implementation of the SaslPropertiesResolver.
* Uses a white list of IPs.
* If the connection's IP address is in the list of IP addresses, the salProperties
* will be unchanged.
* If the connection's IP is not in the list of IP addresses, then QOP for the
* connection will be restricted to "hadoop.rpc.protection.non-whitelist"
*
* Uses 3 IPList implementations together to form an aggregate whitelist.
* 1. ConstantIPList - to check against a set of hardcoded IPs
* 2. Fixed IP List - to check against a list of IP addresses which are specified externally, but
* will not change over runtime.
* 3. Variable IP List - to check against a list of IP addresses which are specified externally and
* could change during runtime.
* A connection IP address will checked against these 3 IP Lists in the order specified above.
* Once a match is found , the IP address is determined to be in whitelist.
*
* The behavior can be configured using a bunch of configuration parameters.
*
*/
public class WhitelistBasedResolver extends SaslPropertiesResolver {
public static final Log LOG = LogFactory.getLog(WhitelistBasedResolver.class);
private static final String FIXEDWHITELIST_DEFAULT_LOCATION = "/etc/hadoop/fixedwhitelist";
private static final String VARIABLEWHITELIST_DEFAULT_LOCATION = "/etc/hadoop/whitelist";
/**
* Path to the file to containing subnets and ip addresses to form fixed whitelist.
*/
public static final String HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE =
"hadoop.security.sasl.fixedwhitelist.file";
/**
* Enables/Disables variable whitelist
*/
public static final String HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE =
"hadoop.security.sasl.variablewhitelist.enable";
/**
* Path to the file to containing subnets and ip addresses to form variable whitelist.
*/
public static final String HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE =
"hadoop.security.sasl.variablewhitelist.file";
/**
* time in seconds by which the variable whitelist file is checked for updates
*/
public static final String HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS =
"hadoop.security.sasl.variablewhitelist.cache.secs";
/**
* comma separated list containing alternate hadoop.rpc.protection values for
* clients which are not in whitelist
*/
public static final String HADOOP_RPC_PROTECTION_NON_WHITELIST =
"hadoop.rpc.protection.non-whitelist";
private CombinedIPWhiteList whiteList;
private Map<String, String> saslProps;
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
String fixedFile = conf.get(HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE,
FIXEDWHITELIST_DEFAULT_LOCATION);
String variableFile = null;
long expiryTime = 0;
if (conf.getBoolean(HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE, false)) {
variableFile = conf.get(HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE,
VARIABLEWHITELIST_DEFAULT_LOCATION);
expiryTime =
conf.getLong(HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,3600) * 1000;
}
whiteList = new CombinedIPWhiteList(fixedFile,variableFile,expiryTime);
this.saslProps = getSaslProperties(conf);
}
/**
* Identify the Sasl Properties to be used for a connection with a client.
* @param clientAddress client's address
* @return the sasl properties to be used for the connection.
*/
@Override
public Map<String, String> getServerProperties(InetAddress clientAddress) {
if (clientAddress == null) {
return saslProps;
}
return whiteList.isIn(clientAddress.getHostAddress())?getDefaultProperties():saslProps;
}
public Map<String, String> getServerProperties(String clientAddress) throws UnknownHostException {
if (clientAddress == null) {
return saslProps;
}
return getServerProperties(InetAddress.getByName(clientAddress));
}
static Map<String, String> getSaslProperties(Configuration conf) {
Map<String, String> saslProps =new TreeMap<String, String>();
String[] qop = conf.getStrings(HADOOP_RPC_PROTECTION_NON_WHITELIST,
QualityOfProtection.PRIVACY.toString());
for (int i=0; i < qop.length; i++) {
qop[i] = QualityOfProtection.valueOf(
StringUtils.toUpperCase(qop[i])).getSaslQop();
}
saslProps.put(Sasl.QOP, StringUtils.join(",", qop));
saslProps.put(Sasl.SERVER_AUTH, "true");
return saslProps;
}
}
| 5,798 | 37.403974 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/IdMappingConstant.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
/**
* Some constants for IdMapping
*/
public class IdMappingConstant {
/** Do user/group update every 15 minutes by default, minimum 1 minute */
public final static String USERGROUPID_UPDATE_MILLIS_KEY = "usergroupid.update.millis";
public final static long USERGROUPID_UPDATE_MILLIS_DEFAULT = 15 * 60 * 1000; // ms
public final static long USERGROUPID_UPDATE_MILLIS_MIN = 1 * 60 * 1000; // ms
public final static String UNKNOWN_USER = "nobody";
public final static String UNKNOWN_GROUP = "nobody";
// Used for finding the configured static mapping file.
public static final String STATIC_ID_MAPPING_FILE_KEY = "static.id.mapping.file";
public static final String STATIC_ID_MAPPING_FILE_DEFAULT = "/etc/nfs.map";
}
| 1,584 | 41.837838 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AccessControlException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* An exception class for access control related issues.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class AccessControlException
extends org.apache.hadoop.fs.permission.AccessControlException {
//Required by {@link java.io.Serializable}.
private static final long serialVersionUID = 1L;
/**
* Default constructor is needed for unwrapping from
* {@link org.apache.hadoop.ipc.RemoteException}.
*/
public AccessControlException() {
super("Permission denied.");
}
/**
* Constructs an {@link AccessControlException}
* with the specified detail message.
* @param s the detail message.
*/
public AccessControlException(String s) {super(s);}
/**
* Constructs a new exception with the specified cause and a detail
* message of <tt>(cause==null ? null : cause.toString())</tt> (which
* typically contains the class and detail message of <tt>cause</tt>).
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
*/
public AccessControlException(Throwable cause) {
super(cause);
}
}
| 2,244 | 35.209677 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ProviderUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
import org.apache.hadoop.security.alias.LocalJavaKeyStoreProvider;
public class ProviderUtils {
/**
* Convert a nested URI to decode the underlying path. The translation takes
* the authority and parses it into the underlying scheme and authority.
* For example, "myscheme://hdfs@nn/my/path" is converted to
* "hdfs://nn/my/path".
* @param nestedUri the URI from the nested URI
* @return the unnested path
*/
public static Path unnestUri(URI nestedUri) {
String[] parts = nestedUri.getAuthority().split("@", 2);
StringBuilder result = new StringBuilder(parts[0]);
result.append("://");
if (parts.length == 2) {
result.append(parts[1]);
}
result.append(nestedUri.getPath());
if (nestedUri.getQuery() != null) {
result.append("?");
result.append(nestedUri.getQuery());
}
if (nestedUri.getFragment() != null) {
result.append("#");
result.append(nestedUri.getFragment());
}
return new Path(result.toString());
}
/**
* Mangle given local java keystore file URI to allow use as a
* LocalJavaKeyStoreProvider.
* @param localFile absolute URI with file scheme and no authority component.
* i.e. return of File.toURI,
* e.g. file:///home/larry/creds.jceks
* @return URI of the form localjceks://file/home/larry/creds.jceks
* @throws IllegalArgumentException if localFile isn't not a file uri or if it
* has an authority component.
* @throws URISyntaxException if the wrapping process violates RFC 2396
*/
public static URI nestURIForLocalJavaKeyStoreProvider(final URI localFile)
throws URISyntaxException {
if (!("file".equals(localFile.getScheme()))) {
throw new IllegalArgumentException("passed URI had a scheme other than " +
"file.");
}
if (localFile.getAuthority() != null) {
throw new IllegalArgumentException("passed URI must not have an " +
"authority component. For non-local keystores, please use " +
JavaKeyStoreProvider.class.getName());
}
return new URI(LocalJavaKeyStoreProvider.SCHEME_NAME,
"//file" + localFile.getSchemeSpecificPart(), localFile.getFragment());
}
}
| 3,256 | 38.240964 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.PerformanceAdvisory;
public class JniBasedUnixGroupsMappingWithFallback implements
GroupMappingServiceProvider {
private static final Log LOG = LogFactory
.getLog(JniBasedUnixGroupsMappingWithFallback.class);
private GroupMappingServiceProvider impl;
public JniBasedUnixGroupsMappingWithFallback() {
if (NativeCodeLoader.isNativeCodeLoaded()) {
this.impl = new JniBasedUnixGroupsMapping();
} else {
PerformanceAdvisory.LOG.debug("Falling back to shell based");
this.impl = new ShellBasedUnixGroupsMapping();
}
if (LOG.isDebugEnabled()){
LOG.debug("Group mapping impl=" + impl.getClass().getName());
}
}
@Override
public List<String> getGroups(String user) throws IOException {
return impl.getGroups(user);
}
@Override
public void cacheGroupsRefresh() throws IOException {
impl.cacheGroupsRefresh();
}
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
impl.cacheGroupsAdd(groups);
}
}
| 2,075 | 30.938462 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.BufferedOutputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A SaslOutputStream is composed of an OutputStream and a SaslServer (or
* SaslClient) so that write() methods first process the data before writing
* them out to the underlying OutputStream. The SaslServer (or SaslClient)
* object must be fully initialized before being used by a SaslOutputStream.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class SaslOutputStream extends OutputStream {
private final OutputStream outStream;
// processed data ready to be written out
private byte[] saslToken;
private final SaslClient saslClient;
private final SaslServer saslServer;
// buffer holding one byte of incoming data
private final byte[] ibuffer = new byte[1];
private final boolean useWrap;
/**
* Constructs a SASLOutputStream from an OutputStream and a SaslServer <br>
* Note: if the specified OutputStream or SaslServer is null, a
* NullPointerException may be thrown later when they are used.
*
* @param outStream
* the OutputStream to be processed
* @param saslServer
* an initialized SaslServer object
*/
public SaslOutputStream(OutputStream outStream, SaslServer saslServer) {
this.saslServer = saslServer;
this.saslClient = null;
String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP);
this.useWrap = qop != null && !"auth".equalsIgnoreCase(qop);
if (useWrap) {
this.outStream = new BufferedOutputStream(outStream, 64*1024);
} else {
this.outStream = outStream;
}
}
/**
* Constructs a SASLOutputStream from an OutputStream and a SaslClient <br>
* Note: if the specified OutputStream or SaslClient is null, a
* NullPointerException may be thrown later when they are used.
*
* @param outStream
* the OutputStream to be processed
* @param saslClient
* an initialized SaslClient object
*/
public SaslOutputStream(OutputStream outStream, SaslClient saslClient) {
this.saslServer = null;
this.saslClient = saslClient;
String qop = (String) saslClient.getNegotiatedProperty(Sasl.QOP);
this.useWrap = qop != null && !"auth".equalsIgnoreCase(qop);
if (useWrap) {
this.outStream = new BufferedOutputStream(outStream, 64*1024);
} else {
this.outStream = outStream;
}
}
/**
* Disposes of any system resources or security-sensitive information Sasl
* might be using.
*
* @exception SaslException
* if a SASL error occurs.
*/
private void disposeSasl() throws SaslException {
if (saslClient != null) {
saslClient.dispose();
}
if (saslServer != null) {
saslServer.dispose();
}
}
/**
* Writes the specified byte to this output stream.
*
* @param b
* the <code>byte</code>.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void write(int b) throws IOException {
if (!useWrap) {
outStream.write(b);
return;
}
ibuffer[0] = (byte) b;
write(ibuffer, 0, 1);
}
/**
* Writes <code>b.length</code> bytes from the specified byte array to this
* output stream.
* <p>
* The <code>write</code> method of <code>SASLOutputStream</code> calls the
* <code>write</code> method of three arguments with the three arguments
* <code>b</code>, <code>0</code>, and <code>b.length</code>.
*
* @param b
* the data.
* @exception NullPointerException
* if <code>b</code> is null.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void write(byte[] b) throws IOException {
write(b, 0, b.length);
}
/**
* Writes <code>len</code> bytes from the specified byte array starting at
* offset <code>off</code> to this output stream.
*
* @param inBuf
* the data.
* @param off
* the start offset in the data.
* @param len
* the number of bytes to write.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void write(byte[] inBuf, int off, int len) throws IOException {
if (!useWrap) {
outStream.write(inBuf, off, len);
return;
}
try {
if (saslServer != null) { // using saslServer
saslToken = saslServer.wrap(inBuf, off, len);
} else { // using saslClient
saslToken = saslClient.wrap(inBuf, off, len);
}
} catch (SaslException se) {
try {
disposeSasl();
} catch (SaslException ignored) {
}
throw se;
}
if (saslToken != null) {
ByteArrayOutputStream byteOut = new ByteArrayOutputStream();
DataOutputStream dout = new DataOutputStream(byteOut);
dout.writeInt(saslToken.length);
outStream.write(byteOut.toByteArray());
outStream.write(saslToken, 0, saslToken.length);
saslToken = null;
}
}
/**
* Flushes this output stream
*
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void flush() throws IOException {
outStream.flush();
}
/**
* Closes this output stream and releases any system resources associated with
* this stream.
*
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void close() throws IOException {
disposeSasl();
outStream.close();
}
}
| 6,719 | 29.825688 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
/**
* An implementation of {@link GroupMappingServiceProvider} which
* composites other group mapping providers for determining group membership.
* This allows to combine existing provider implementations and composite
* a virtually new provider without customized development to deal with complex situation.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class CompositeGroupsMapping
implements GroupMappingServiceProvider, Configurable {
public static final String MAPPING_PROVIDERS_CONFIG_KEY = GROUP_MAPPING_CONFIG_PREFIX + ".providers";
public static final String MAPPING_PROVIDERS_COMBINED_CONFIG_KEY = MAPPING_PROVIDERS_CONFIG_KEY + ".combined";
public static final String MAPPING_PROVIDER_CONFIG_PREFIX = GROUP_MAPPING_CONFIG_PREFIX + ".provider";
private static final Log LOG = LogFactory.getLog(CompositeGroupsMapping.class);
private List<GroupMappingServiceProvider> providersList =
new ArrayList<GroupMappingServiceProvider>();
private Configuration conf;
private boolean combined;
/**
* Returns list of groups for a user.
*
* @param user get groups for this user
* @return list of groups for a given user
*/
@Override
public synchronized List<String> getGroups(String user) throws IOException {
Set<String> groupSet = new TreeSet<String>();
List<String> groups = null;
for (GroupMappingServiceProvider provider : providersList) {
try {
groups = provider.getGroups(user);
} catch (Exception e) {
//LOG.warn("Exception trying to get groups for user " + user, e);
}
if (groups != null && ! groups.isEmpty()) {
groupSet.addAll(groups);
if (!combined) break;
}
}
List<String> results = new ArrayList<String>(groupSet.size());
results.addAll(groupSet);
return results;
}
/**
* Caches groups, no need to do that for this provider
*/
@Override
public void cacheGroupsRefresh() throws IOException {
// does nothing in this provider of user to groups mapping
}
/**
* Adds groups to cache, no need to do that for this provider
*
* @param groups unused
*/
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
// does nothing in this provider of user to groups mapping
}
@Override
public synchronized Configuration getConf() {
return conf;
}
@Override
public synchronized void setConf(Configuration conf) {
this.conf = conf;
this.combined = conf.getBoolean(MAPPING_PROVIDERS_COMBINED_CONFIG_KEY, true);
loadMappingProviders();
}
private void loadMappingProviders() {
String[] providerNames = conf.getStrings(MAPPING_PROVIDERS_CONFIG_KEY, new String[]{});
String providerKey;
for (String name : providerNames) {
providerKey = MAPPING_PROVIDER_CONFIG_PREFIX + "." + name;
Class<?> providerClass = conf.getClass(providerKey, null);
if (providerClass == null) {
LOG.error("The mapping provider, " + name + " does not have a valid class");
} else {
addMappingProvider(name, providerClass);
}
}
}
private void addMappingProvider(String providerName, Class<?> providerClass) {
Configuration newConf = prepareConf(providerName);
GroupMappingServiceProvider provider =
(GroupMappingServiceProvider) ReflectionUtils.newInstance(providerClass, newConf);
providersList.add(provider);
}
/*
* For any provider specific configuration properties, such as "hadoop.security.group.mapping.ldap.url"
* and the like, allow them to be configured as "hadoop.security.group.mapping.provider.PROVIDER-X.ldap.url",
* so that a provider such as LdapGroupsMapping can be used to composite a complex one with other providers.
*/
private Configuration prepareConf(String providerName) {
Configuration newConf = new Configuration();
Iterator<Map.Entry<String, String>> entries = conf.iterator();
String providerKey = MAPPING_PROVIDER_CONFIG_PREFIX + "." + providerName;
while (entries.hasNext()) {
Map.Entry<String, String> entry = entries.next();
String key = entry.getKey();
// get a property like "hadoop.security.group.mapping.provider.PROVIDER-X.ldap.url"
if (key.startsWith(providerKey) && !key.equals(providerKey)) {
// restore to be the one like "hadoop.security.group.mapping.ldap.url"
// so that can be used by original provider.
key = key.replace(".provider." + providerName, "");
newConf.set(key, entry.getValue());
}
}
return newConf;
}
}
| 6,028 | 35.101796 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
/**
* A JNI-based implementation of {@link GroupMappingServiceProvider}
* that invokes libC calls to get the group
* memberships of a given user.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class JniBasedUnixGroupsMapping implements GroupMappingServiceProvider {
private static final Log LOG =
LogFactory.getLog(JniBasedUnixGroupsMapping.class);
static {
if (!NativeCodeLoader.isNativeCodeLoaded()) {
throw new RuntimeException("Bailing out since native library couldn't " +
"be loaded");
}
anchorNative();
LOG.debug("Using JniBasedUnixGroupsMapping for Group resolution");
}
/**
* Set up our JNI resources.
*
* @throws RuntimeException if setup fails.
*/
native static void anchorNative();
/**
* Get the set of groups associated with a user.
*
* @param username The user name
*
* @return The set of groups associated with a user.
*/
native static String[] getGroupsForUser(String username);
/**
* Log an error message about a group. Used from JNI.
*/
static private void logError(int groupId, String error) {
LOG.error("error looking up the name of group " + groupId + ": " + error);
}
@Override
public List<String> getGroups(String user) throws IOException {
String[] groups = new String[0];
try {
groups = getGroupsForUser(user);
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Error getting groups for " + user, e);
} else {
LOG.info("Error getting groups for " + user + ": " + e.getMessage());
}
}
return Arrays.asList(groups);
}
@Override
public void cacheGroupsRefresh() throws IOException {
// does nothing in this provider of user to groups mapping
}
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
// does nothing in this provider of user to groups mapping
}
}
| 3,186 | 30.554455 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.LinkedList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.security.NetgroupCache;
/**
* A JNI-based implementation of {@link GroupMappingServiceProvider}
* that invokes libC calls to get the group
* memberships of a given user.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class JniBasedUnixGroupsNetgroupMapping
extends JniBasedUnixGroupsMapping {
private static final Log LOG = LogFactory.getLog(
JniBasedUnixGroupsNetgroupMapping.class);
native String[] getUsersForNetgroupJNI(String group);
static {
if (!NativeCodeLoader.isNativeCodeLoaded()) {
throw new RuntimeException("Bailing out since native library couldn't " +
"be loaded");
}
LOG.debug("Using JniBasedUnixGroupsNetgroupMapping for Netgroup resolution");
}
/**
* Gets unix groups and netgroups for the user.
*
* It gets all unix groups as returned by id -Gn but it
* only returns netgroups that are used in ACLs (there is
* no way to get all netgroups for a given user, see
* documentation for getent netgroup)
*/
@Override
public List<String> getGroups(String user) throws IOException {
// parent gets unix groups
List<String> groups = new LinkedList<String>(super.getGroups(user));
NetgroupCache.getNetgroups(user, groups);
return groups;
}
/**
* Refresh the netgroup cache
*/
@Override
public void cacheGroupsRefresh() throws IOException {
List<String> groups = NetgroupCache.getNetgroupNames();
NetgroupCache.clear();
cacheGroupsAdd(groups);
}
/**
* Add a group to cache, only netgroups are cached
*
* @param groups list of group names to add to cache
*/
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
for(String group: groups) {
if(group.length() == 0) {
// better safe than sorry (should never happen)
} else if(group.charAt(0) == '@') {
if(!NetgroupCache.isCached(group)) {
NetgroupCache.add(group, getUsersForNetgroup(group));
}
} else {
// unix group, not caching
}
}
}
/**
* Calls JNI function to get users for a netgroup, since C functions
* are not reentrant we need to make this synchronized (see
* documentation for setnetgrent, getnetgrent and endnetgrent)
*
* @param netgroup return users for this netgroup
* @return list of users for a given netgroup
*/
protected synchronized List<String> getUsersForNetgroup(String netgroup) {
String[] users = null;
try {
// JNI code does not expect '@' at the begining of the group name
users = getUsersForNetgroupJNI(netgroup.substring(1));
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Error getting users for netgroup " + netgroup, e);
} else {
LOG.info("Error getting users for netgroup " + netgroup +
": " + e.getMessage());
}
}
if (users != null && users.length != 0) {
return Arrays.asList(users);
}
return new LinkedList<String>();
}
}
| 4,273 | 31.625954 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.security.Provider;
import java.util.Map;
import javax.security.auth.callback.*;
import javax.security.sasl.AuthorizeCallback;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import javax.security.sasl.SaslServerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class SaslPlainServer implements SaslServer {
@SuppressWarnings("serial")
public static class SecurityProvider extends Provider {
public SecurityProvider() {
super("SaslPlainServer", 1.0, "SASL PLAIN Authentication Server");
put("SaslServerFactory.PLAIN",
SaslPlainServerFactory.class.getName());
}
}
public static class SaslPlainServerFactory implements SaslServerFactory {
@Override
public SaslServer createSaslServer(String mechanism, String protocol,
String serverName, Map<String,?> props, CallbackHandler cbh)
throws SaslException {
return "PLAIN".equals(mechanism) ? new SaslPlainServer(cbh) : null;
}
@Override
public String[] getMechanismNames(Map<String,?> props){
return (props == null) || "false".equals(props.get(Sasl.POLICY_NOPLAINTEXT))
? new String[]{"PLAIN"}
: new String[0];
}
}
private CallbackHandler cbh;
private boolean completed;
private String authz;
SaslPlainServer(CallbackHandler callback) {
this.cbh = callback;
}
@Override
public String getMechanismName() {
return "PLAIN";
}
@Override
public byte[] evaluateResponse(byte[] response) throws SaslException {
if (completed) {
throw new IllegalStateException("PLAIN authentication has completed");
}
if (response == null) {
throw new IllegalArgumentException("Received null response");
}
try {
String payload;
try {
payload = new String(response, "UTF-8");
} catch (Exception e) {
throw new IllegalArgumentException("Received corrupt response", e);
}
// [ authz, authn, password ]
String[] parts = payload.split("\u0000", 3);
if (parts.length != 3) {
throw new IllegalArgumentException("Received corrupt response");
}
if (parts[0].isEmpty()) { // authz = authn
parts[0] = parts[1];
}
NameCallback nc = new NameCallback("SASL PLAIN");
nc.setName(parts[1]);
PasswordCallback pc = new PasswordCallback("SASL PLAIN", false);
pc.setPassword(parts[2].toCharArray());
AuthorizeCallback ac = new AuthorizeCallback(parts[1], parts[0]);
cbh.handle(new Callback[]{nc, pc, ac});
if (ac.isAuthorized()) {
authz = ac.getAuthorizedID();
}
} catch (Exception e) {
throw new SaslException("PLAIN auth failed: " + e.getMessage());
} finally {
completed = true;
}
return null;
}
private void throwIfNotComplete() {
if (!completed) {
throw new IllegalStateException("PLAIN authentication not completed");
}
}
@Override
public boolean isComplete() {
return completed;
}
@Override
public String getAuthorizationID() {
throwIfNotComplete();
return authz;
}
@Override
public Object getNegotiatedProperty(String propName) {
throwIfNotComplete();
return Sasl.QOP.equals(propName) ? "auth" : null;
}
@Override
public byte[] wrap(byte[] outgoing, int offset, int len)
throws SaslException {
throwIfNotComplete();
throw new IllegalStateException(
"PLAIN supports neither integrity nor privacy");
}
@Override
public byte[] unwrap(byte[] incoming, int offset, int len)
throws SaslException {
throwIfNotComplete();
throw new IllegalStateException(
"PLAIN supports neither integrity nor privacy");
}
@Override
public void dispose() throws SaslException {
cbh = null;
authz = null;
}
}
| 4,895 | 29.792453 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
/**
* This class implements parsing and handling of Kerberos principal names. In
* particular, it splits them apart and translates them down into local
* operating system names.
*/
@SuppressWarnings("all")
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class HadoopKerberosName extends KerberosName {
/**
* Create a name from the full Kerberos principal name.
* @param name
*/
public HadoopKerberosName(String name) {
super(name);
}
/**
* Set the static configuration to get the rules.
* <p/>
* IMPORTANT: This method does a NOP if the rules have been set already.
* If there is a need to reset the rules, the {@link KerberosName#setRules(String)}
* method should be invoked directly.
*
* @param conf the new configuration
* @throws IOException
*/
public static void setConfiguration(Configuration conf) throws IOException {
final String defaultRule;
switch (SecurityUtil.getAuthenticationMethod(conf)) {
case KERBEROS:
case KERBEROS_SSL:
try {
KerberosUtil.getDefaultRealm();
} catch (Exception ke) {
throw new IllegalArgumentException("Can't get Kerberos realm", ke);
}
defaultRule = "DEFAULT";
break;
default:
// just extract the simple user name
defaultRule = "RULE:[1:$1] RULE:[2:$1]";
break;
}
String ruleString = conf.get(HADOOP_SECURITY_AUTH_TO_LOCAL, defaultRule);
setRules(ruleString);
}
public static void main(String[] args) throws Exception {
setConfiguration(new Configuration());
for(String arg: args) {
HadoopKerberosName name = new HadoopKerberosName(arg);
System.out.println("Name: " + name + " to " + name.getShortName());
}
}
}
| 3,076 | 34.77907 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Hashtable;
import java.util.List;
import javax.naming.CommunicationException;
import javax.naming.Context;
import javax.naming.NamingEnumeration;
import javax.naming.NamingException;
import javax.naming.directory.Attribute;
import javax.naming.directory.DirContext;
import javax.naming.directory.InitialDirContext;
import javax.naming.directory.SearchControls;
import javax.naming.directory.SearchResult;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
/**
* An implementation of {@link GroupMappingServiceProvider} which
* connects directly to an LDAP server for determining group membership.
*
* This provider should be used only if it is necessary to map users to
* groups that reside exclusively in an Active Directory or LDAP installation.
* The common case for a Hadoop installation will be that LDAP users and groups
* materialized on the Unix servers, and for an installation like that,
* ShellBasedUnixGroupsMapping is preferred. However, in cases where
* those users and groups aren't materialized in Unix, but need to be used for
* access control, this class may be used to communicate directly with the LDAP
* server.
*
* It is important to note that resolving group mappings will incur network
* traffic, and may cause degraded performance, although user-group mappings
* will be cached via the infrastructure provided by {@link Groups}.
*
* This implementation does not support configurable search limits. If a filter
* is used for searching users or groups which returns more results than are
* allowed by the server, an exception will be thrown.
*
* The implementation also does not attempt to resolve group hierarchies. In
* order to be considered a member of a group, the user must be an explicit
* member in LDAP.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class LdapGroupsMapping
implements GroupMappingServiceProvider, Configurable {
public static final String LDAP_CONFIG_PREFIX = "hadoop.security.group.mapping.ldap";
/*
* URL of the LDAP server
*/
public static final String LDAP_URL_KEY = LDAP_CONFIG_PREFIX + ".url";
public static final String LDAP_URL_DEFAULT = "";
/*
* Should SSL be used to connect to the server
*/
public static final String LDAP_USE_SSL_KEY = LDAP_CONFIG_PREFIX + ".ssl";
public static final Boolean LDAP_USE_SSL_DEFAULT = false;
/*
* File path to the location of the SSL keystore to use
*/
public static final String LDAP_KEYSTORE_KEY = LDAP_CONFIG_PREFIX + ".ssl.keystore";
public static final String LDAP_KEYSTORE_DEFAULT = "";
/*
* Password for the keystore
*/
public static final String LDAP_KEYSTORE_PASSWORD_KEY = LDAP_CONFIG_PREFIX + ".ssl.keystore.password";
public static final String LDAP_KEYSTORE_PASSWORD_DEFAULT = "";
public static final String LDAP_KEYSTORE_PASSWORD_FILE_KEY = LDAP_KEYSTORE_PASSWORD_KEY + ".file";
public static final String LDAP_KEYSTORE_PASSWORD_FILE_DEFAULT = "";
/*
* User to bind to the LDAP server with
*/
public static final String BIND_USER_KEY = LDAP_CONFIG_PREFIX + ".bind.user";
public static final String BIND_USER_DEFAULT = "";
/*
* Password for the bind user
*/
public static final String BIND_PASSWORD_KEY = LDAP_CONFIG_PREFIX + ".bind.password";
public static final String BIND_PASSWORD_DEFAULT = "";
public static final String BIND_PASSWORD_FILE_KEY = BIND_PASSWORD_KEY + ".file";
public static final String BIND_PASSWORD_FILE_DEFAULT = "";
/*
* Base distinguished name to use for searches
*/
public static final String BASE_DN_KEY = LDAP_CONFIG_PREFIX + ".base";
public static final String BASE_DN_DEFAULT = "";
/*
* Any additional filters to apply when searching for users
*/
public static final String USER_SEARCH_FILTER_KEY = LDAP_CONFIG_PREFIX + ".search.filter.user";
public static final String USER_SEARCH_FILTER_DEFAULT = "(&(objectClass=user)(sAMAccountName={0}))";
/*
* Any additional filters to apply when finding relevant groups
*/
public static final String GROUP_SEARCH_FILTER_KEY = LDAP_CONFIG_PREFIX + ".search.filter.group";
public static final String GROUP_SEARCH_FILTER_DEFAULT = "(objectClass=group)";
/*
* LDAP attribute to use for determining group membership
*/
public static final String GROUP_MEMBERSHIP_ATTR_KEY = LDAP_CONFIG_PREFIX + ".search.attr.member";
public static final String GROUP_MEMBERSHIP_ATTR_DEFAULT = "member";
/*
* LDAP attribute to use for identifying a group's name
*/
public static final String GROUP_NAME_ATTR_KEY = LDAP_CONFIG_PREFIX + ".search.attr.group.name";
public static final String GROUP_NAME_ATTR_DEFAULT = "cn";
/*
* LDAP attribute names to use when doing posix-like lookups
*/
public static final String POSIX_UID_ATTR_KEY = LDAP_CONFIG_PREFIX + ".posix.attr.uid.name";
public static final String POSIX_UID_ATTR_DEFAULT = "uidNumber";
public static final String POSIX_GID_ATTR_KEY = LDAP_CONFIG_PREFIX + ".posix.attr.gid.name";
public static final String POSIX_GID_ATTR_DEFAULT = "gidNumber";
/*
* Posix attributes
*/
public static final String POSIX_GROUP = "posixGroup";
public static final String POSIX_ACCOUNT = "posixAccount";
/*
* LDAP {@link SearchControls} attribute to set the time limit
* for an invoked directory search. Prevents infinite wait cases.
*/
public static final String DIRECTORY_SEARCH_TIMEOUT =
LDAP_CONFIG_PREFIX + ".directory.search.timeout";
public static final int DIRECTORY_SEARCH_TIMEOUT_DEFAULT = 10000; // 10s
private static final Log LOG = LogFactory.getLog(LdapGroupsMapping.class);
private static final SearchControls SEARCH_CONTROLS = new SearchControls();
static {
SEARCH_CONTROLS.setSearchScope(SearchControls.SUBTREE_SCOPE);
}
private DirContext ctx;
private Configuration conf;
private String ldapUrl;
private boolean useSsl;
private String keystore;
private String keystorePass;
private String bindUser;
private String bindPassword;
private String baseDN;
private String groupSearchFilter;
private String userSearchFilter;
private String groupMemberAttr;
private String groupNameAttr;
private String posixUidAttr;
private String posixGidAttr;
private boolean isPosix;
public static int RECONNECT_RETRY_COUNT = 3;
/**
* Returns list of groups for a user.
*
* The LdapCtx which underlies the DirContext object is not thread-safe, so
* we need to block around this whole method. The caching infrastructure will
* ensure that performance stays in an acceptable range.
*
* @param user get groups for this user
* @return list of groups for a given user
*/
@Override
public synchronized List<String> getGroups(String user) throws IOException {
List<String> emptyResults = new ArrayList<String>();
/*
* Normal garbage collection takes care of removing Context instances when they are no longer in use.
* Connections used by Context instances being garbage collected will be closed automatically.
* So in case connection is closed and gets CommunicationException, retry some times with new new DirContext/connection.
*/
try {
return doGetGroups(user);
} catch (CommunicationException e) {
LOG.warn("Connection is closed, will try to reconnect");
} catch (NamingException e) {
LOG.warn("Exception trying to get groups for user " + user + ": "
+ e.getMessage());
return emptyResults;
}
int retryCount = 0;
while (retryCount ++ < RECONNECT_RETRY_COUNT) {
//reset ctx so that new DirContext can be created with new connection
this.ctx = null;
try {
return doGetGroups(user);
} catch (CommunicationException e) {
LOG.warn("Connection being closed, reconnecting failed, retryCount = " + retryCount);
} catch (NamingException e) {
LOG.warn("Exception trying to get groups for user " + user + ":"
+ e.getMessage());
return emptyResults;
}
}
return emptyResults;
}
List<String> doGetGroups(String user) throws NamingException {
List<String> groups = new ArrayList<String>();
DirContext ctx = getDirContext();
// Search for the user. We'll only ever need to look at the first result
NamingEnumeration<SearchResult> results = ctx.search(baseDN,
userSearchFilter,
new Object[]{user},
SEARCH_CONTROLS);
if (results.hasMoreElements()) {
SearchResult result = results.nextElement();
String userDn = result.getNameInNamespace();
NamingEnumeration<SearchResult> groupResults = null;
if (isPosix) {
String gidNumber = null;
String uidNumber = null;
Attribute gidAttribute = result.getAttributes().get(posixGidAttr);
Attribute uidAttribute = result.getAttributes().get(posixUidAttr);
if (gidAttribute != null) {
gidNumber = gidAttribute.get().toString();
}
if (uidAttribute != null) {
uidNumber = uidAttribute.get().toString();
}
if (uidNumber != null && gidNumber != null) {
groupResults =
ctx.search(baseDN,
"(&"+ groupSearchFilter + "(|(" + posixGidAttr + "={0})" +
"(" + groupMemberAttr + "={1})))",
new Object[] { gidNumber, uidNumber },
SEARCH_CONTROLS);
}
} else {
groupResults =
ctx.search(baseDN,
"(&" + groupSearchFilter + "(" + groupMemberAttr + "={0}))",
new Object[]{userDn},
SEARCH_CONTROLS);
}
if (groupResults != null) {
while (groupResults.hasMoreElements()) {
SearchResult groupResult = groupResults.nextElement();
Attribute groupName = groupResult.getAttributes().get(groupNameAttr);
groups.add(groupName.get().toString());
}
}
}
return groups;
}
DirContext getDirContext() throws NamingException {
if (ctx == null) {
// Set up the initial environment for LDAP connectivity
Hashtable<String, String> env = new Hashtable<String, String>();
env.put(Context.INITIAL_CONTEXT_FACTORY,
com.sun.jndi.ldap.LdapCtxFactory.class.getName());
env.put(Context.PROVIDER_URL, ldapUrl);
env.put(Context.SECURITY_AUTHENTICATION, "simple");
// Set up SSL security, if necessary
if (useSsl) {
env.put(Context.SECURITY_PROTOCOL, "ssl");
System.setProperty("javax.net.ssl.keyStore", keystore);
System.setProperty("javax.net.ssl.keyStorePassword", keystorePass);
}
env.put(Context.SECURITY_PRINCIPAL, bindUser);
env.put(Context.SECURITY_CREDENTIALS, bindPassword);
ctx = new InitialDirContext(env);
}
return ctx;
}
/**
* Caches groups, no need to do that for this provider
*/
@Override
public void cacheGroupsRefresh() throws IOException {
// does nothing in this provider of user to groups mapping
}
/**
* Adds groups to cache, no need to do that for this provider
*
* @param groups unused
*/
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
// does nothing in this provider of user to groups mapping
}
@Override
public synchronized Configuration getConf() {
return conf;
}
@Override
public synchronized void setConf(Configuration conf) {
ldapUrl = conf.get(LDAP_URL_KEY, LDAP_URL_DEFAULT);
if (ldapUrl == null || ldapUrl.isEmpty()) {
throw new RuntimeException("LDAP URL is not configured");
}
useSsl = conf.getBoolean(LDAP_USE_SSL_KEY, LDAP_USE_SSL_DEFAULT);
keystore = conf.get(LDAP_KEYSTORE_KEY, LDAP_KEYSTORE_DEFAULT);
keystorePass = getPassword(conf, LDAP_KEYSTORE_PASSWORD_KEY,
LDAP_KEYSTORE_PASSWORD_DEFAULT);
if (keystorePass.isEmpty()) {
keystorePass = extractPassword(conf.get(LDAP_KEYSTORE_PASSWORD_FILE_KEY,
LDAP_KEYSTORE_PASSWORD_FILE_DEFAULT));
}
bindUser = conf.get(BIND_USER_KEY, BIND_USER_DEFAULT);
bindPassword = getPassword(conf, BIND_PASSWORD_KEY, BIND_PASSWORD_DEFAULT);
if (bindPassword.isEmpty()) {
bindPassword = extractPassword(
conf.get(BIND_PASSWORD_FILE_KEY, BIND_PASSWORD_FILE_DEFAULT));
}
baseDN = conf.get(BASE_DN_KEY, BASE_DN_DEFAULT);
groupSearchFilter =
conf.get(GROUP_SEARCH_FILTER_KEY, GROUP_SEARCH_FILTER_DEFAULT);
userSearchFilter =
conf.get(USER_SEARCH_FILTER_KEY, USER_SEARCH_FILTER_DEFAULT);
isPosix = groupSearchFilter.contains(POSIX_GROUP) && userSearchFilter
.contains(POSIX_ACCOUNT);
groupMemberAttr =
conf.get(GROUP_MEMBERSHIP_ATTR_KEY, GROUP_MEMBERSHIP_ATTR_DEFAULT);
groupNameAttr =
conf.get(GROUP_NAME_ATTR_KEY, GROUP_NAME_ATTR_DEFAULT);
posixUidAttr =
conf.get(POSIX_UID_ATTR_KEY, POSIX_UID_ATTR_DEFAULT);
posixGidAttr =
conf.get(POSIX_GID_ATTR_KEY, POSIX_GID_ATTR_DEFAULT);
int dirSearchTimeout = conf.getInt(DIRECTORY_SEARCH_TIMEOUT, DIRECTORY_SEARCH_TIMEOUT_DEFAULT);
SEARCH_CONTROLS.setTimeLimit(dirSearchTimeout);
// Limit the attributes returned to only those required to speed up the search.
// See HADOOP-10626 and HADOOP-12001 for more details.
SEARCH_CONTROLS.setReturningAttributes(
new String[] {groupNameAttr, posixUidAttr, posixGidAttr});
this.conf = conf;
}
String getPassword(Configuration conf, String alias, String defaultPass) {
String password = null;
try {
char[] passchars = conf.getPassword(alias);
if (passchars != null) {
password = new String(passchars);
}
else {
password = defaultPass;
}
}
catch (IOException ioe) {
LOG.warn("Exception while trying to password for alias " + alias + ": "
+ ioe.getMessage());
}
return password;
}
String extractPassword(String pwFile) {
if (pwFile.isEmpty()) {
// If there is no password file defined, we'll assume that we should do
// an anonymous bind
return "";
}
StringBuilder password = new StringBuilder();
try (Reader reader = new InputStreamReader(
new FileInputStream(pwFile), Charsets.UTF_8)) {
int c = reader.read();
while (c > -1) {
password.append((char)c);
c = reader.read();
}
return password.toString().trim();
} catch (IOException ioe) {
throw new RuntimeException("Could not read password file: " + pwFile, ioe);
}
}
}
| 16,024 | 35.420455 | 125 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/User.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.security.Principal;
import javax.security.auth.login.LoginContext;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
/**
* Save the full and short name of the user as a principal. This allows us to
* have a single type that we always look for when picking up user names.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
class User implements Principal {
private final String fullName;
private final String shortName;
private volatile AuthenticationMethod authMethod = null;
private volatile LoginContext login = null;
private volatile long lastLogin = 0;
public User(String name) {
this(name, null, null);
}
public User(String name, AuthenticationMethod authMethod, LoginContext login) {
try {
shortName = new HadoopKerberosName(name).getShortName();
} catch (IOException ioe) {
throw new IllegalArgumentException("Illegal principal name " + name
+": " + ioe.toString(), ioe);
}
fullName = name;
this.authMethod = authMethod;
this.login = login;
}
/**
* Get the full name of the user.
*/
@Override
public String getName() {
return fullName;
}
/**
* Get the user name up to the first '/' or '@'
* @return the leading part of the user name
*/
public String getShortName() {
return shortName;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
} else {
return ((fullName.equals(((User) o).fullName)) && (authMethod == ((User) o).authMethod));
}
}
@Override
public int hashCode() {
return fullName.hashCode();
}
@Override
public String toString() {
return fullName;
}
public void setAuthenticationMethod(AuthenticationMethod authMethod) {
this.authMethod = authMethod;
}
public AuthenticationMethod getAuthenticationMethod() {
return authMethod;
}
/**
* Returns login object
* @return login
*/
public LoginContext getLogin() {
return login;
}
/**
* Set the login object
* @param login
*/
public void setLogin(LoginContext login) {
this.login = login;
}
/**
* Set the last login time.
* @param time the number of milliseconds since the beginning of time
*/
public void setLastLogin(long time) {
lastLogin = time;
}
/**
* Get the time of the last login.
* @return the number of milliseconds since the beginning of time.
*/
public long getLastLogin() {
return lastLogin;
}
}
| 3,657 | 25.897059 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
import java.io.File;
import java.io.IOException;
import java.lang.reflect.UndeclaredThrowableException;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.security.Principal;
import java.security.PrivilegedAction;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.security.auth.Subject;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.kerberos.KerberosTicket;
import javax.security.auth.kerberos.KeyTab;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
import javax.security.auth.spi.LoginModule;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
/**
* User and group information for Hadoop.
* This class wraps around a JAAS Subject and provides methods to determine the
* user's username and groups. It supports both the Windows, Unix and Kerberos
* login modules.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive", "Oozie"})
@InterfaceStability.Evolving
public class UserGroupInformation {
private static final Log LOG = LogFactory.getLog(UserGroupInformation.class);
/**
* Percentage of the ticket window to use before we renew ticket.
*/
private static final float TICKET_RENEW_WINDOW = 0.80f;
private static boolean shouldRenewImmediatelyForTests = false;
static final String HADOOP_USER_NAME = "HADOOP_USER_NAME";
static final String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
/**
* For the purposes of unit tests, we want to test login
* from keytab and don't want to wait until the renew
* window (controlled by TICKET_RENEW_WINDOW).
* @param immediate true if we should login without waiting for ticket window
*/
@VisibleForTesting
static void setShouldRenewImmediatelyForTests(boolean immediate) {
shouldRenewImmediatelyForTests = immediate;
}
/**
* UgiMetrics maintains UGI activity statistics
* and publishes them through the metrics interfaces.
*/
@Metrics(about="User and group related metrics", context="ugi")
static class UgiMetrics {
final MetricsRegistry registry = new MetricsRegistry("UgiMetrics");
@Metric("Rate of successful kerberos logins and latency (milliseconds)")
MutableRate loginSuccess;
@Metric("Rate of failed kerberos logins and latency (milliseconds)")
MutableRate loginFailure;
@Metric("GetGroups") MutableRate getGroups;
MutableQuantiles[] getGroupsQuantiles;
static UgiMetrics create() {
return DefaultMetricsSystem.instance().register(new UgiMetrics());
}
void addGetGroups(long latency) {
getGroups.add(latency);
if (getGroupsQuantiles != null) {
for (MutableQuantiles q : getGroupsQuantiles) {
q.add(latency);
}
}
}
}
/**
* A login module that looks at the Kerberos, Unix, or Windows principal and
* adds the corresponding UserName.
*/
@InterfaceAudience.Private
public static class HadoopLoginModule implements LoginModule {
private Subject subject;
@Override
public boolean abort() throws LoginException {
return true;
}
private <T extends Principal> T getCanonicalUser(Class<T> cls) {
for(T user: subject.getPrincipals(cls)) {
return user;
}
return null;
}
@Override
public boolean commit() throws LoginException {
if (LOG.isDebugEnabled()) {
LOG.debug("hadoop login commit");
}
// if we already have a user, we are done.
if (!subject.getPrincipals(User.class).isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("using existing subject:"+subject.getPrincipals());
}
return true;
}
Principal user = null;
// if we are using kerberos, try it out
if (isAuthenticationMethodEnabled(AuthenticationMethod.KERBEROS)) {
user = getCanonicalUser(KerberosPrincipal.class);
if (LOG.isDebugEnabled()) {
LOG.debug("using kerberos user:"+user);
}
}
//If we don't have a kerberos user and security is disabled, check
//if user is specified in the environment or properties
if (!isSecurityEnabled() && (user == null)) {
String envUser = System.getenv(HADOOP_USER_NAME);
if (envUser == null) {
envUser = System.getProperty(HADOOP_USER_NAME);
}
user = envUser == null ? null : new User(envUser);
}
// use the OS user
if (user == null) {
user = getCanonicalUser(OS_PRINCIPAL_CLASS);
if (LOG.isDebugEnabled()) {
LOG.debug("using local user:"+user);
}
}
// if we found the user, add our principal
if (user != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Using user: \"" + user + "\" with name " + user.getName());
}
User userEntry = null;
try {
userEntry = new User(user.getName());
} catch (Exception e) {
throw (LoginException)(new LoginException(e.toString()).initCause(e));
}
if (LOG.isDebugEnabled()) {
LOG.debug("User entry: \"" + userEntry.toString() + "\"" );
}
subject.getPrincipals().add(userEntry);
return true;
}
LOG.error("Can't find user in " + subject);
throw new LoginException("Can't find user name");
}
@Override
public void initialize(Subject subject, CallbackHandler callbackHandler,
Map<String, ?> sharedState, Map<String, ?> options) {
this.subject = subject;
}
@Override
public boolean login() throws LoginException {
if (LOG.isDebugEnabled()) {
LOG.debug("hadoop login");
}
return true;
}
@Override
public boolean logout() throws LoginException {
if (LOG.isDebugEnabled()) {
LOG.debug("hadoop logout");
}
return true;
}
}
/** Metrics to track UGI activity */
static UgiMetrics metrics = UgiMetrics.create();
/** The auth method to use */
private static AuthenticationMethod authenticationMethod;
/** Server-side groups fetching service */
private static Groups groups;
/** The configuration to use */
private static Configuration conf;
/** Leave 10 minutes between relogin attempts. */
private static final long MIN_TIME_BEFORE_RELOGIN = 10 * 60 * 1000L;
/**Environment variable pointing to the token cache file*/
public static final String HADOOP_TOKEN_FILE_LOCATION =
"HADOOP_TOKEN_FILE_LOCATION";
/**
* A method to initialize the fields that depend on a configuration.
* Must be called before useKerberos or groups is used.
*/
private static void ensureInitialized() {
if (conf == null) {
synchronized(UserGroupInformation.class) {
if (conf == null) { // someone might have beat us
initialize(new Configuration(), false);
}
}
}
}
/**
* Initialize UGI and related classes.
* @param conf the configuration to use
*/
private static synchronized void initialize(Configuration conf,
boolean overrideNameRules) {
authenticationMethod = SecurityUtil.getAuthenticationMethod(conf);
if (overrideNameRules || !HadoopKerberosName.hasRulesBeenSet()) {
try {
HadoopKerberosName.setConfiguration(conf);
} catch (IOException ioe) {
throw new RuntimeException(
"Problem with Kerberos auth_to_local name configuration", ioe);
}
}
// If we haven't set up testing groups, use the configuration to find it
if (!(groups instanceof TestingGroups)) {
groups = Groups.getUserToGroupsMappingService(conf);
}
UserGroupInformation.conf = conf;
if (metrics.getGroupsQuantiles == null) {
int[] intervals = conf.getInts(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS);
if (intervals != null && intervals.length > 0) {
final int length = intervals.length;
MutableQuantiles[] getGroupsQuantiles = new MutableQuantiles[length];
for (int i = 0; i < length; i++) {
getGroupsQuantiles[i] = metrics.registry.newQuantiles(
"getGroups" + intervals[i] + "s",
"Get groups", "ops", "latency", intervals[i]);
}
metrics.getGroupsQuantiles = getGroupsQuantiles;
}
}
}
/**
* Set the static configuration for UGI.
* In particular, set the security authentication mechanism and the
* group look up service.
* @param conf the configuration to use
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) {
initialize(conf, true);
}
@InterfaceAudience.Private
@VisibleForTesting
static void reset() {
authenticationMethod = null;
conf = null;
groups = null;
setLoginUser(null);
HadoopKerberosName.setRules(null);
}
/**
* Determine if UserGroupInformation is using Kerberos to determine
* user identities or is relying on simple authentication
*
* @return true if UGI is working in a secure environment
*/
public static boolean isSecurityEnabled() {
return !isAuthenticationMethodEnabled(AuthenticationMethod.SIMPLE);
}
@InterfaceAudience.Private
@InterfaceStability.Evolving
private static boolean isAuthenticationMethodEnabled(AuthenticationMethod method) {
ensureInitialized();
return (authenticationMethod == method);
}
/**
* Information about the logged in user.
*/
private static UserGroupInformation loginUser = null;
private static String keytabPrincipal = null;
private static String keytabFile = null;
private final Subject subject;
// All non-static fields must be read-only caches that come from the subject.
private final User user;
private final boolean isKeytab;
private final boolean isKrbTkt;
private static String OS_LOGIN_MODULE_NAME;
private static Class<? extends Principal> OS_PRINCIPAL_CLASS;
private static final boolean windows =
System.getProperty("os.name").startsWith("Windows");
private static final boolean is64Bit =
System.getProperty("os.arch").contains("64") ||
System.getProperty("os.arch").contains("s390x");
private static final boolean aix = System.getProperty("os.name").equals("AIX");
/* Return the OS login module class name */
private static String getOSLoginModuleName() {
if (IBM_JAVA) {
if (windows) {
return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule"
: "com.ibm.security.auth.module.NTLoginModule";
} else if (aix) {
return is64Bit ? "com.ibm.security.auth.module.AIX64LoginModule"
: "com.ibm.security.auth.module.AIXLoginModule";
} else {
return "com.ibm.security.auth.module.LinuxLoginModule";
}
} else {
return windows ? "com.sun.security.auth.module.NTLoginModule"
: "com.sun.security.auth.module.UnixLoginModule";
}
}
/* Return the OS principal class */
@SuppressWarnings("unchecked")
private static Class<? extends Principal> getOsPrincipalClass() {
ClassLoader cl = ClassLoader.getSystemClassLoader();
try {
String principalClass = null;
if (IBM_JAVA) {
if (is64Bit) {
principalClass = "com.ibm.security.auth.UsernamePrincipal";
} else {
if (windows) {
principalClass = "com.ibm.security.auth.NTUserPrincipal";
} else if (aix) {
principalClass = "com.ibm.security.auth.AIXPrincipal";
} else {
principalClass = "com.ibm.security.auth.LinuxPrincipal";
}
}
} else {
principalClass = windows ? "com.sun.security.auth.NTUserPrincipal"
: "com.sun.security.auth.UnixPrincipal";
}
return (Class<? extends Principal>) cl.loadClass(principalClass);
} catch (ClassNotFoundException e) {
LOG.error("Unable to find JAAS classes:" + e.getMessage());
}
return null;
}
static {
OS_LOGIN_MODULE_NAME = getOSLoginModuleName();
OS_PRINCIPAL_CLASS = getOsPrincipalClass();
}
private static class RealUser implements Principal {
private final UserGroupInformation realUser;
RealUser(UserGroupInformation realUser) {
this.realUser = realUser;
}
@Override
public String getName() {
return realUser.getUserName();
}
public UserGroupInformation getRealUser() {
return realUser;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
} else {
return realUser.equals(((RealUser) o).realUser);
}
}
@Override
public int hashCode() {
return realUser.hashCode();
}
@Override
public String toString() {
return realUser.toString();
}
}
/**
* A JAAS configuration that defines the login modules that we want
* to use for login.
*/
private static class HadoopConfiguration
extends javax.security.auth.login.Configuration {
private static final String SIMPLE_CONFIG_NAME = "hadoop-simple";
private static final String USER_KERBEROS_CONFIG_NAME =
"hadoop-user-kerberos";
private static final String KEYTAB_KERBEROS_CONFIG_NAME =
"hadoop-keytab-kerberos";
private static final Map<String, String> BASIC_JAAS_OPTIONS =
new HashMap<String,String>();
static {
String jaasEnvVar = System.getenv("HADOOP_JAAS_DEBUG");
if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) {
BASIC_JAAS_OPTIONS.put("debug", "true");
}
}
private static final AppConfigurationEntry OS_SPECIFIC_LOGIN =
new AppConfigurationEntry(OS_LOGIN_MODULE_NAME,
LoginModuleControlFlag.REQUIRED,
BASIC_JAAS_OPTIONS);
private static final AppConfigurationEntry HADOOP_LOGIN =
new AppConfigurationEntry(HadoopLoginModule.class.getName(),
LoginModuleControlFlag.REQUIRED,
BASIC_JAAS_OPTIONS);
private static final Map<String,String> USER_KERBEROS_OPTIONS =
new HashMap<String,String>();
static {
if (IBM_JAVA) {
USER_KERBEROS_OPTIONS.put("useDefaultCcache", "true");
} else {
USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
}
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
if (IBM_JAVA) {
// The first value searched when "useDefaultCcache" is used.
System.setProperty("KRB5CCNAME", ticketCache);
} else {
USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
}
}
USER_KERBEROS_OPTIONS.put("renewTGT", "true");
USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
}
private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
LoginModuleControlFlag.OPTIONAL,
USER_KERBEROS_OPTIONS);
private static final Map<String,String> KEYTAB_KERBEROS_OPTIONS =
new HashMap<String,String>();
static {
if (IBM_JAVA) {
KEYTAB_KERBEROS_OPTIONS.put("credsType", "both");
} else {
KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
}
KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true");
KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
}
private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN =
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
LoginModuleControlFlag.REQUIRED,
KEYTAB_KERBEROS_OPTIONS);
private static final AppConfigurationEntry[] SIMPLE_CONF =
new AppConfigurationEntry[]{OS_SPECIFIC_LOGIN, HADOOP_LOGIN};
private static final AppConfigurationEntry[] USER_KERBEROS_CONF =
new AppConfigurationEntry[]{OS_SPECIFIC_LOGIN, USER_KERBEROS_LOGIN,
HADOOP_LOGIN};
private static final AppConfigurationEntry[] KEYTAB_KERBEROS_CONF =
new AppConfigurationEntry[]{KEYTAB_KERBEROS_LOGIN, HADOOP_LOGIN};
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
if (SIMPLE_CONFIG_NAME.equals(appName)) {
return SIMPLE_CONF;
} else if (USER_KERBEROS_CONFIG_NAME.equals(appName)) {
return USER_KERBEROS_CONF;
} else if (KEYTAB_KERBEROS_CONFIG_NAME.equals(appName)) {
if (IBM_JAVA) {
KEYTAB_KERBEROS_OPTIONS.put("useKeytab",
prependFileAuthority(keytabFile));
} else {
KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile);
}
KEYTAB_KERBEROS_OPTIONS.put("principal", keytabPrincipal);
return KEYTAB_KERBEROS_CONF;
}
return null;
}
}
private static String prependFileAuthority(String keytabPath) {
return keytabPath.startsWith("file://") ? keytabPath
: "file://" + keytabPath;
}
/**
* Represents a javax.security configuration that is created at runtime.
*/
private static class DynamicConfiguration
extends javax.security.auth.login.Configuration {
private AppConfigurationEntry[] ace;
DynamicConfiguration(AppConfigurationEntry[] ace) {
this.ace = ace;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
return ace;
}
}
private static LoginContext
newLoginContext(String appName, Subject subject,
javax.security.auth.login.Configuration loginConf)
throws LoginException {
// Temporarily switch the thread's ContextClassLoader to match this
// class's classloader, so that we can properly load HadoopLoginModule
// from the JAAS libraries.
Thread t = Thread.currentThread();
ClassLoader oldCCL = t.getContextClassLoader();
t.setContextClassLoader(HadoopLoginModule.class.getClassLoader());
try {
return new LoginContext(appName, subject, null, loginConf);
} finally {
t.setContextClassLoader(oldCCL);
}
}
private LoginContext getLogin() {
return user.getLogin();
}
private void setLogin(LoginContext login) {
user.setLogin(login);
}
/**
* Create a UserGroupInformation for the given subject.
* This does not change the subject or acquire new credentials.
* @param subject the user's subject
*/
UserGroupInformation(Subject subject) {
this.subject = subject;
this.user = subject.getPrincipals(User.class).iterator().next();
this.isKeytab = !subject.getPrivateCredentials(KeyTab.class).isEmpty();
this.isKrbTkt = !subject.getPrivateCredentials(KerberosTicket.class).isEmpty();
}
/**
* checks if logged in using kerberos
* @return true if the subject logged via keytab or has a Kerberos TGT
*/
public boolean hasKerberosCredentials() {
return isKeytab || isKrbTkt;
}
/**
* Return the current user, including any doAs in the current stack.
* @return the current user
* @throws IOException if login fails
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public synchronized
static UserGroupInformation getCurrentUser() throws IOException {
AccessControlContext context = AccessController.getContext();
Subject subject = Subject.getSubject(context);
if (subject == null || subject.getPrincipals(User.class).isEmpty()) {
return getLoginUser();
} else {
return new UserGroupInformation(subject);
}
}
/**
* Find the most appropriate UserGroupInformation to use
*
* @param ticketCachePath The Kerberos ticket cache path, or NULL
* if none is specfied
* @param user The user name, or NULL if none is specified.
*
* @return The most appropriate UserGroupInformation
*/
public static UserGroupInformation getBestUGI(
String ticketCachePath, String user) throws IOException {
if (ticketCachePath != null) {
return getUGIFromTicketCache(ticketCachePath, user);
} else if (user == null) {
return getCurrentUser();
} else {
return createRemoteUser(user);
}
}
/**
* Create a UserGroupInformation from a Kerberos ticket cache.
*
* @param user The principal name to load from the ticket
* cache
* @param ticketCachePath the path to the ticket cache file
*
* @throws IOException if the kerberos login fails
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getUGIFromTicketCache(
String ticketCache, String user) throws IOException {
if (!isAuthenticationMethodEnabled(AuthenticationMethod.KERBEROS)) {
return getBestUGI(null, user);
}
try {
Map<String,String> krbOptions = new HashMap<String,String>();
if (IBM_JAVA) {
krbOptions.put("useDefaultCcache", "true");
// The first value searched when "useDefaultCcache" is used.
System.setProperty("KRB5CCNAME", ticketCache);
} else {
krbOptions.put("doNotPrompt", "true");
krbOptions.put("useTicketCache", "true");
krbOptions.put("useKeyTab", "false");
krbOptions.put("ticketCache", ticketCache);
}
krbOptions.put("renewTGT", "false");
krbOptions.putAll(HadoopConfiguration.BASIC_JAAS_OPTIONS);
AppConfigurationEntry ace = new AppConfigurationEntry(
KerberosUtil.getKrb5LoginModuleName(),
LoginModuleControlFlag.REQUIRED,
krbOptions);
DynamicConfiguration dynConf =
new DynamicConfiguration(new AppConfigurationEntry[]{ ace });
LoginContext login = newLoginContext(
HadoopConfiguration.USER_KERBEROS_CONFIG_NAME, null, dynConf);
login.login();
Subject loginSubject = login.getSubject();
Set<Principal> loginPrincipals = loginSubject.getPrincipals();
if (loginPrincipals.isEmpty()) {
throw new RuntimeException("No login principals found!");
}
if (loginPrincipals.size() != 1) {
LOG.warn("found more than one principal in the ticket cache file " +
ticketCache);
}
User ugiUser = new User(loginPrincipals.iterator().next().getName(),
AuthenticationMethod.KERBEROS, login);
loginSubject.getPrincipals().add(ugiUser);
UserGroupInformation ugi = new UserGroupInformation(loginSubject);
ugi.setLogin(login);
ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
return ugi;
} catch (LoginException le) {
throw new IOException("failure to login using ticket cache file " +
ticketCache, le);
}
}
/**
* Create a UserGroupInformation from a Subject with Kerberos principal.
*
* @param user The KerberosPrincipal to use in UGI
*
* @throws IOException if the kerberos login fails
*/
public static UserGroupInformation getUGIFromSubject(Subject subject)
throws IOException {
if (subject == null) {
throw new IOException("Subject must not be null");
}
if (subject.getPrincipals(KerberosPrincipal.class).isEmpty()) {
throw new IOException("Provided Subject must contain a KerberosPrincipal");
}
KerberosPrincipal principal =
subject.getPrincipals(KerberosPrincipal.class).iterator().next();
User ugiUser = new User(principal.getName(),
AuthenticationMethod.KERBEROS, null);
subject.getPrincipals().add(ugiUser);
UserGroupInformation ugi = new UserGroupInformation(subject);
ugi.setLogin(null);
ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
return ugi;
}
/**
* Get the currently logged in user.
* @return the logged in user
* @throws IOException if login fails
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public synchronized
static UserGroupInformation getLoginUser() throws IOException {
if (loginUser == null) {
loginUserFromSubject(null);
}
return loginUser;
}
/**
* remove the login method that is followed by a space from the username
* e.g. "jack (auth:SIMPLE)" -> "jack"
*
* @param userName
* @return userName without login method
*/
public static String trimLoginMethod(String userName) {
int spaceIndex = userName.indexOf(' ');
if (spaceIndex >= 0) {
userName = userName.substring(0, spaceIndex);
}
return userName;
}
/**
* Log in a user using the given subject
* @parma subject the subject to use when logging in a user, or null to
* create a new subject.
* @throws IOException if login fails
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public synchronized
static void loginUserFromSubject(Subject subject) throws IOException {
ensureInitialized();
try {
if (subject == null) {
subject = new Subject();
}
LoginContext login =
newLoginContext(authenticationMethod.getLoginAppName(),
subject, new HadoopConfiguration());
login.login();
UserGroupInformation realUser = new UserGroupInformation(subject);
realUser.setLogin(login);
realUser.setAuthenticationMethod(authenticationMethod);
realUser = new UserGroupInformation(login.getSubject());
// If the HADOOP_PROXY_USER environment variable or property
// is specified, create a proxy user as the logged in user.
String proxyUser = System.getenv(HADOOP_PROXY_USER);
if (proxyUser == null) {
proxyUser = System.getProperty(HADOOP_PROXY_USER);
}
loginUser = proxyUser == null ? realUser : createProxyUser(proxyUser, realUser);
String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
if (fileLocation != null) {
// Load the token storage file and put all of the tokens into the
// user. Don't use the FileSystem API for reading since it has a lock
// cycle (HADOOP-9212).
Credentials cred = Credentials.readTokenStorageFile(
new File(fileLocation), conf);
loginUser.addCredentials(cred);
}
loginUser.spawnAutoRenewalThreadForUserCreds();
} catch (LoginException le) {
LOG.debug("failure to login", le);
throw new IOException("failure to login", le);
}
if (LOG.isDebugEnabled()) {
LOG.debug("UGI loginUser:"+loginUser);
}
}
@InterfaceAudience.Private
@InterfaceStability.Unstable
@VisibleForTesting
public synchronized static void setLoginUser(UserGroupInformation ugi) {
// if this is to become stable, should probably logout the currently
// logged in ugi if it's different
loginUser = ugi;
}
/**
* Is this user logged in from a keytab file?
* @return true if the credentials are from a keytab file.
*/
public boolean isFromKeytab() {
return isKeytab;
}
/**
* Get the Kerberos TGT
* @return the user's TGT or null if none was found
*/
private synchronized KerberosTicket getTGT() {
Set<KerberosTicket> tickets = subject
.getPrivateCredentials(KerberosTicket.class);
for (KerberosTicket ticket : tickets) {
if (SecurityUtil.isOriginalTGT(ticket)) {
return ticket;
}
}
return null;
}
private long getRefreshTime(KerberosTicket tgt) {
long start = tgt.getStartTime().getTime();
long end = tgt.getEndTime().getTime();
return start + (long) ((end - start) * TICKET_RENEW_WINDOW);
}
/**Spawn a thread to do periodic renewals of kerberos credentials*/
private void spawnAutoRenewalThreadForUserCreds() {
if (isSecurityEnabled()) {
//spawn thread only if we have kerb credentials
if (user.getAuthenticationMethod() == AuthenticationMethod.KERBEROS &&
!isKeytab) {
Thread t = new Thread(new Runnable() {
@Override
public void run() {
String cmd = conf.get("hadoop.kerberos.kinit.command",
"kinit");
KerberosTicket tgt = getTGT();
if (tgt == null) {
return;
}
long nextRefresh = getRefreshTime(tgt);
while (true) {
try {
long now = Time.now();
if(LOG.isDebugEnabled()) {
LOG.debug("Current time is " + now);
LOG.debug("Next refresh is " + nextRefresh);
}
if (now < nextRefresh) {
Thread.sleep(nextRefresh - now);
}
Shell.execCommand(cmd, "-R");
if(LOG.isDebugEnabled()) {
LOG.debug("renewed ticket");
}
reloginFromTicketCache();
tgt = getTGT();
if (tgt == null) {
LOG.warn("No TGT after renewal. Aborting renew thread for " +
getUserName());
return;
}
nextRefresh = Math.max(getRefreshTime(tgt),
now + MIN_TIME_BEFORE_RELOGIN);
} catch (InterruptedException ie) {
LOG.warn("Terminating renewal thread");
return;
} catch (IOException ie) {
LOG.warn("Exception encountered while running the" +
" renewal command. Aborting renew thread. " + ie);
return;
}
}
}
});
t.setDaemon(true);
t.setName("TGT Renewer for " + getUserName());
t.start();
}
}
}
/**
* Log a user in from a keytab file. Loads a user identity from a keytab
* file and logs them in. They become the currently logged-in user.
* @param user the principal name to load from the keytab
* @param path the path to the keytab file
* @throws IOException if the keytab file can't be read
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public synchronized
static void loginUserFromKeytab(String user,
String path
) throws IOException {
if (!isSecurityEnabled())
return;
keytabFile = path;
keytabPrincipal = user;
Subject subject = new Subject();
LoginContext login;
long start = 0;
try {
login = newLoginContext(HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME,
subject, new HadoopConfiguration());
start = Time.now();
login.login();
metrics.loginSuccess.add(Time.now() - start);
loginUser = new UserGroupInformation(subject);
loginUser.setLogin(login);
loginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
} catch (LoginException le) {
if (start > 0) {
metrics.loginFailure.add(Time.now() - start);
}
throw new IOException("Login failure for " + user + " from keytab " +
path+ ": " + le, le);
}
LOG.info("Login successful for user " + keytabPrincipal
+ " using keytab file " + keytabFile);
}
/**
* Re-login a user from keytab if TGT is expired or is close to expiry.
*
* @throws IOException
*/
public synchronized void checkTGTAndReloginFromKeytab() throws IOException {
if (!isSecurityEnabled()
|| user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
|| !isKeytab)
return;
KerberosTicket tgt = getTGT();
if (tgt != null && !shouldRenewImmediatelyForTests &&
Time.now() < getRefreshTime(tgt)) {
return;
}
reloginFromKeytab();
}
/**
* Re-Login a user in from a keytab file. Loads a user identity from a keytab
* file and logs them in. They become the currently logged-in user. This
* method assumes that {@link #loginUserFromKeytab(String, String)} had
* happened already.
* The Subject field of this UserGroupInformation object is updated to have
* the new credentials.
* @throws IOException on a failure
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public synchronized void reloginFromKeytab()
throws IOException {
if (!isSecurityEnabled() ||
user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS ||
!isKeytab)
return;
long now = Time.now();
if (!shouldRenewImmediatelyForTests && !hasSufficientTimeElapsed(now)) {
return;
}
KerberosTicket tgt = getTGT();
//Return if TGT is valid and is not going to expire soon.
if (tgt != null && !shouldRenewImmediatelyForTests &&
now < getRefreshTime(tgt)) {
return;
}
LoginContext login = getLogin();
if (login == null || keytabFile == null) {
throw new IOException("loginUserFromKeyTab must be done first");
}
long start = 0;
// register most recent relogin attempt
user.setLastLogin(now);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Initiating logout for " + getUserName());
}
synchronized (UserGroupInformation.class) {
// clear up the kerberos state. But the tokens are not cleared! As per
// the Java kerberos login module code, only the kerberos credentials
// are cleared
login.logout();
// login and also update the subject field of this instance to
// have the new credentials (pass it to the LoginContext constructor)
login = newLoginContext(
HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, getSubject(),
new HadoopConfiguration());
if (LOG.isDebugEnabled()) {
LOG.debug("Initiating re-login for " + keytabPrincipal);
}
start = Time.now();
login.login();
metrics.loginSuccess.add(Time.now() - start);
setLogin(login);
}
} catch (LoginException le) {
if (start > 0) {
metrics.loginFailure.add(Time.now() - start);
}
throw new IOException("Login failure for " + keytabPrincipal +
" from keytab " + keytabFile, le);
}
}
/**
* Re-Login a user in from the ticket cache. This
* method assumes that login had happened already.
* The Subject field of this UserGroupInformation object is updated to have
* the new credentials.
* @throws IOException on a failure
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public synchronized void reloginFromTicketCache()
throws IOException {
if (!isSecurityEnabled() ||
user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS ||
!isKrbTkt)
return;
LoginContext login = getLogin();
if (login == null) {
throw new IOException("login must be done first");
}
long now = Time.now();
if (!hasSufficientTimeElapsed(now)) {
return;
}
// register most recent relogin attempt
user.setLastLogin(now);
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Initiating logout for " + getUserName());
}
//clear up the kerberos state. But the tokens are not cleared! As per
//the Java kerberos login module code, only the kerberos credentials
//are cleared
login.logout();
//login and also update the subject field of this instance to
//have the new credentials (pass it to the LoginContext constructor)
login =
newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME,
getSubject(), new HadoopConfiguration());
if (LOG.isDebugEnabled()) {
LOG.debug("Initiating re-login for " + getUserName());
}
login.login();
setLogin(login);
} catch (LoginException le) {
throw new IOException("Login failure for " + getUserName(), le);
}
}
/**
* Log a user in from a keytab file. Loads a user identity from a keytab
* file and login them in. This new user does not affect the currently
* logged-in user.
* @param user the principal name to load from the keytab
* @param path the path to the keytab file
* @throws IOException if the keytab file can't be read
*/
public synchronized
static UserGroupInformation loginUserFromKeytabAndReturnUGI(String user,
String path
) throws IOException {
if (!isSecurityEnabled())
return UserGroupInformation.getCurrentUser();
String oldKeytabFile = null;
String oldKeytabPrincipal = null;
long start = 0;
try {
oldKeytabFile = keytabFile;
oldKeytabPrincipal = keytabPrincipal;
keytabFile = path;
keytabPrincipal = user;
Subject subject = new Subject();
LoginContext login = newLoginContext(
HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, subject,
new HadoopConfiguration());
start = Time.now();
login.login();
metrics.loginSuccess.add(Time.now() - start);
UserGroupInformation newLoginUser = new UserGroupInformation(subject);
newLoginUser.setLogin(login);
newLoginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
return newLoginUser;
} catch (LoginException le) {
if (start > 0) {
metrics.loginFailure.add(Time.now() - start);
}
throw new IOException("Login failure for " + user + " from keytab " +
path, le);
} finally {
if(oldKeytabFile != null) keytabFile = oldKeytabFile;
if(oldKeytabPrincipal != null) keytabPrincipal = oldKeytabPrincipal;
}
}
private boolean hasSufficientTimeElapsed(long now) {
if (now - user.getLastLogin() < MIN_TIME_BEFORE_RELOGIN ) {
LOG.warn("Not attempting to re-login since the last re-login was " +
"attempted less than " + (MIN_TIME_BEFORE_RELOGIN/1000) + " seconds"+
" before.");
return false;
}
return true;
}
/**
* Did the login happen via keytab
* @return true or false
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public synchronized static boolean isLoginKeytabBased() throws IOException {
return getLoginUser().isKeytab;
}
/**
* Did the login happen via ticket cache
* @return true or false
*/
public static boolean isLoginTicketBased() throws IOException {
return getLoginUser().isKrbTkt;
}
/**
* Create a user from a login name. It is intended to be used for remote
* users in RPC, since it won't have any credentials.
* @param user the full user principal name, must not be empty or null
* @return the UserGroupInformation for the remote user.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createRemoteUser(String user) {
return createRemoteUser(user, AuthMethod.SIMPLE);
}
/**
* Create a user from a login name. It is intended to be used for remote
* users in RPC, since it won't have any credentials.
* @param user the full user principal name, must not be empty or null
* @return the UserGroupInformation for the remote user.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createRemoteUser(String user, AuthMethod authMethod) {
if (user == null || user.isEmpty()) {
throw new IllegalArgumentException("Null user");
}
Subject subject = new Subject();
subject.getPrincipals().add(new User(user));
UserGroupInformation result = new UserGroupInformation(subject);
result.setAuthenticationMethod(authMethod);
return result;
}
/**
* existing types of authentications' methods
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static enum AuthenticationMethod {
// currently we support only one auth per method, but eventually a
// subtype is needed to differentiate, ex. if digest is token or ldap
SIMPLE(AuthMethod.SIMPLE,
HadoopConfiguration.SIMPLE_CONFIG_NAME),
KERBEROS(AuthMethod.KERBEROS,
HadoopConfiguration.USER_KERBEROS_CONFIG_NAME),
TOKEN(AuthMethod.TOKEN),
CERTIFICATE(null),
KERBEROS_SSL(null),
PROXY(null);
private final AuthMethod authMethod;
private final String loginAppName;
private AuthenticationMethod(AuthMethod authMethod) {
this(authMethod, null);
}
private AuthenticationMethod(AuthMethod authMethod, String loginAppName) {
this.authMethod = authMethod;
this.loginAppName = loginAppName;
}
public AuthMethod getAuthMethod() {
return authMethod;
}
String getLoginAppName() {
if (loginAppName == null) {
throw new UnsupportedOperationException(
this + " login authentication is not supported");
}
return loginAppName;
}
public static AuthenticationMethod valueOf(AuthMethod authMethod) {
for (AuthenticationMethod value : values()) {
if (value.getAuthMethod() == authMethod) {
return value;
}
}
throw new IllegalArgumentException(
"no authentication method for " + authMethod);
}
};
/**
* Create a proxy user using username of the effective user and the ugi of the
* real user.
* @param user
* @param realUser
* @return proxyUser ugi
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createProxyUser(String user,
UserGroupInformation realUser) {
if (user == null || user.isEmpty()) {
throw new IllegalArgumentException("Null user");
}
if (realUser == null) {
throw new IllegalArgumentException("Null real user");
}
Subject subject = new Subject();
Set<Principal> principals = subject.getPrincipals();
principals.add(new User(user));
principals.add(new RealUser(realUser));
UserGroupInformation result =new UserGroupInformation(subject);
result.setAuthenticationMethod(AuthenticationMethod.PROXY);
return result;
}
/**
* get RealUser (vs. EffectiveUser)
* @return realUser running over proxy user
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public UserGroupInformation getRealUser() {
for (RealUser p: subject.getPrincipals(RealUser.class)) {
return p.getRealUser();
}
return null;
}
/**
* This class is used for storing the groups for testing. It stores a local
* map that has the translation of usernames to groups.
*/
private static class TestingGroups extends Groups {
private final Map<String, List<String>> userToGroupsMapping =
new HashMap<String,List<String>>();
private Groups underlyingImplementation;
private TestingGroups(Groups underlyingImplementation) {
super(new org.apache.hadoop.conf.Configuration());
this.underlyingImplementation = underlyingImplementation;
}
@Override
public List<String> getGroups(String user) throws IOException {
List<String> result = userToGroupsMapping.get(user);
if (result == null) {
result = underlyingImplementation.getGroups(user);
}
return result;
}
private void setUserGroups(String user, String[] groups) {
userToGroupsMapping.put(user, Arrays.asList(groups));
}
}
/**
* Create a UGI for testing HDFS and MapReduce
* @param user the full user principal name
* @param userGroups the names of the groups that the user belongs to
* @return a fake user for running unit tests
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createUserForTesting(String user,
String[] userGroups) {
ensureInitialized();
UserGroupInformation ugi = createRemoteUser(user);
// make sure that the testing object is setup
if (!(groups instanceof TestingGroups)) {
groups = new TestingGroups(groups);
}
// add the user groups
((TestingGroups) groups).setUserGroups(ugi.getShortUserName(), userGroups);
return ugi;
}
/**
* Create a proxy user UGI for testing HDFS and MapReduce
*
* @param user
* the full user principal name for effective user
* @param realUser
* UGI of the real user
* @param userGroups
* the names of the groups that the user belongs to
* @return a fake user for running unit tests
*/
public static UserGroupInformation createProxyUserForTesting(String user,
UserGroupInformation realUser, String[] userGroups) {
ensureInitialized();
UserGroupInformation ugi = createProxyUser(user, realUser);
// make sure that the testing object is setup
if (!(groups instanceof TestingGroups)) {
groups = new TestingGroups(groups);
}
// add the user groups
((TestingGroups) groups).setUserGroups(ugi.getShortUserName(), userGroups);
return ugi;
}
/**
* Get the user's login name.
* @return the user's name up to the first '/' or '@'.
*/
public String getShortUserName() {
for (User p: subject.getPrincipals(User.class)) {
return p.getShortName();
}
return null;
}
public String getPrimaryGroupName() throws IOException {
String[] groups = getGroupNames();
if (groups.length == 0) {
throw new IOException("There is no primary group for UGI " + this);
}
return groups[0];
}
/**
* Get the user's full principal name.
* @return the user's full principal name.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public String getUserName() {
return user.getName();
}
/**
* Add a TokenIdentifier to this UGI. The TokenIdentifier has typically been
* authenticated by the RPC layer as belonging to the user represented by this
* UGI.
*
* @param tokenId
* tokenIdentifier to be added
* @return true on successful add of new tokenIdentifier
*/
public synchronized boolean addTokenIdentifier(TokenIdentifier tokenId) {
return subject.getPublicCredentials().add(tokenId);
}
/**
* Get the set of TokenIdentifiers belonging to this UGI
*
* @return the set of TokenIdentifiers belonging to this UGI
*/
public synchronized Set<TokenIdentifier> getTokenIdentifiers() {
return subject.getPublicCredentials(TokenIdentifier.class);
}
/**
* Add a token to this UGI
*
* @param token Token to be added
* @return true on successful add of new token
*/
public boolean addToken(Token<? extends TokenIdentifier> token) {
return (token != null) ? addToken(token.getService(), token) : false;
}
/**
* Add a named token to this UGI
*
* @param alias Name of the token
* @param token Token to be added
* @return true on successful add of new token
*/
public boolean addToken(Text alias, Token<? extends TokenIdentifier> token) {
synchronized (subject) {
getCredentialsInternal().addToken(alias, token);
return true;
}
}
/**
* Obtain the collection of tokens associated with this user.
*
* @return an unmodifiable collection of tokens associated with user
*/
public Collection<Token<? extends TokenIdentifier>> getTokens() {
synchronized (subject) {
return Collections.unmodifiableCollection(
new ArrayList<Token<?>>(getCredentialsInternal().getAllTokens()));
}
}
/**
* Obtain the tokens in credentials form associated with this user.
*
* @return Credentials of tokens associated with this user
*/
public Credentials getCredentials() {
synchronized (subject) {
Credentials creds = new Credentials(getCredentialsInternal());
Iterator<Token<?>> iter = creds.getAllTokens().iterator();
while (iter.hasNext()) {
if (iter.next() instanceof Token.PrivateToken) {
iter.remove();
}
}
return creds;
}
}
/**
* Add the given Credentials to this user.
* @param credentials of tokens and secrets
*/
public void addCredentials(Credentials credentials) {
synchronized (subject) {
getCredentialsInternal().addAll(credentials);
}
}
private synchronized Credentials getCredentialsInternal() {
final Credentials credentials;
final Set<Credentials> credentialsSet =
subject.getPrivateCredentials(Credentials.class);
if (!credentialsSet.isEmpty()){
credentials = credentialsSet.iterator().next();
} else {
credentials = new Credentials();
subject.getPrivateCredentials().add(credentials);
}
return credentials;
}
/**
* Get the group names for this user.
* @return the list of users with the primary group first. If the command
* fails, it returns an empty list.
*/
public synchronized String[] getGroupNames() {
ensureInitialized();
try {
Set<String> result = new LinkedHashSet<String>
(groups.getGroups(getShortUserName()));
return result.toArray(new String[result.size()]);
} catch (IOException ie) {
LOG.warn("No groups available for user " + getShortUserName());
return new String[0];
}
}
/**
* Return the username.
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder(getUserName());
sb.append(" (auth:"+getAuthenticationMethod()+")");
if (getRealUser() != null) {
sb.append(" via ").append(getRealUser().toString());
}
return sb.toString();
}
/**
* Sets the authentication method in the subject
*
* @param authMethod
*/
public synchronized
void setAuthenticationMethod(AuthenticationMethod authMethod) {
user.setAuthenticationMethod(authMethod);
}
/**
* Sets the authentication method in the subject
*
* @param authMethod
*/
public void setAuthenticationMethod(AuthMethod authMethod) {
user.setAuthenticationMethod(AuthenticationMethod.valueOf(authMethod));
}
/**
* Get the authentication method from the subject
*
* @return AuthenticationMethod in the subject, null if not present.
*/
public synchronized AuthenticationMethod getAuthenticationMethod() {
return user.getAuthenticationMethod();
}
/**
* Get the authentication method from the real user's subject. If there
* is no real user, return the given user's authentication method.
*
* @return AuthenticationMethod in the subject, null if not present.
*/
public synchronized AuthenticationMethod getRealAuthenticationMethod() {
UserGroupInformation ugi = getRealUser();
if (ugi == null) {
ugi = this;
}
return ugi.getAuthenticationMethod();
}
/**
* Returns the authentication method of a ugi. If the authentication method is
* PROXY, returns the authentication method of the real user.
*
* @param ugi
* @return AuthenticationMethod
*/
public static AuthenticationMethod getRealAuthenticationMethod(
UserGroupInformation ugi) {
AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
if (authMethod == AuthenticationMethod.PROXY) {
authMethod = ugi.getRealUser().getAuthenticationMethod();
}
return authMethod;
}
/**
* Compare the subjects to see if they are equal to each other.
*/
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
} else {
return subject == ((UserGroupInformation) o).subject;
}
}
/**
* Return the hash of the subject.
*/
@Override
public int hashCode() {
return System.identityHashCode(subject);
}
/**
* Get the underlying subject from this ugi.
* @return the subject that represents this user.
*/
protected Subject getSubject() {
return subject;
}
/**
* Run the given action as the user.
* @param <T> the return type of the run method
* @param action the method to execute
* @return the value from the run method
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public <T> T doAs(PrivilegedAction<T> action) {
logPrivilegedAction(subject, action);
return Subject.doAs(subject, action);
}
/**
* Run the given action as the user, potentially throwing an exception.
* @param <T> the return type of the run method
* @param action the method to execute
* @return the value from the run method
* @throws IOException if the action throws an IOException
* @throws Error if the action throws an Error
* @throws RuntimeException if the action throws a RuntimeException
* @throws InterruptedException if the action throws an InterruptedException
* @throws UndeclaredThrowableException if the action throws something else
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public <T> T doAs(PrivilegedExceptionAction<T> action
) throws IOException, InterruptedException {
try {
logPrivilegedAction(subject, action);
return Subject.doAs(subject, action);
} catch (PrivilegedActionException pae) {
Throwable cause = pae.getCause();
if (LOG.isDebugEnabled()) {
LOG.debug("PrivilegedActionException as:" + this + " cause:" + cause);
}
if (cause instanceof IOException) {
throw (IOException) cause;
} else if (cause instanceof Error) {
throw (Error) cause;
} else if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
} else if (cause instanceof InterruptedException) {
throw (InterruptedException) cause;
} else {
throw new UndeclaredThrowableException(cause);
}
}
}
private void logPrivilegedAction(Subject subject, Object action) {
if (LOG.isDebugEnabled()) {
// would be nice if action included a descriptive toString()
String where = new Throwable().getStackTrace()[2].toString();
LOG.debug("PrivilegedAction as:"+this+" from:"+where);
}
}
private void print() throws IOException {
System.out.println("User: " + getUserName());
System.out.print("Group Ids: ");
System.out.println();
String[] groups = getGroupNames();
System.out.print("Groups: ");
for(int i=0; i < groups.length; i++) {
System.out.print(groups[i] + " ");
}
System.out.println();
}
/**
* A test method to print out the current user's UGI.
* @param args if there are two arguments, read the user from the keytab
* and print it out.
* @throws Exception
*/
public static void main(String [] args) throws Exception {
System.out.println("Getting UGI for current user");
UserGroupInformation ugi = getCurrentUser();
ugi.print();
System.out.println("UGI: " + ugi);
System.out.println("Auth method " + ugi.user.getAuthenticationMethod());
System.out.println("Keytab " + ugi.isKeytab);
System.out.println("============================================================");
if (args.length == 2) {
System.out.println("Getting UGI from keytab....");
loginUserFromKeytab(args[0], args[1]);
getCurrentUser().print();
System.out.println("Keytab: " + ugi);
System.out.println("Auth method " + loginUser.user.getAuthenticationMethod());
System.out.println("Keytab " + loginUser.isKeytab);
}
}
}
| 58,217 | 32.828007 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMappingWithFallback.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
public class JniBasedUnixGroupsNetgroupMappingWithFallback implements
GroupMappingServiceProvider {
private static final Log LOG = LogFactory
.getLog(JniBasedUnixGroupsNetgroupMappingWithFallback.class);
private GroupMappingServiceProvider impl;
public JniBasedUnixGroupsNetgroupMappingWithFallback() {
if (NativeCodeLoader.isNativeCodeLoaded()) {
this.impl = new JniBasedUnixGroupsNetgroupMapping();
} else {
LOG.info("Falling back to shell based");
this.impl = new ShellBasedUnixGroupsNetgroupMapping();
}
if (LOG.isDebugEnabled()) {
LOG.debug("Group mapping impl=" + impl.getClass().getName());
}
}
@Override
public List<String> getGroups(String user) throws IOException {
return impl.getGroups(user);
}
@Override
public void cacheGroupsRefresh() throws IOException {
impl.cacheGroupsRefresh();
}
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
impl.cacheGroupsAdd(groups);
}
}
| 2,042 | 30.921875 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
ProtocolMetaInterface, RefreshUserMappingsProtocol, Closeable {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final RefreshUserMappingsProtocolPB rpcProxy;
private final static RefreshUserToGroupsMappingsRequestProto
VOID_REFRESH_USER_TO_GROUPS_MAPPING_REQUEST =
RefreshUserToGroupsMappingsRequestProto.newBuilder().build();
private final static RefreshSuperUserGroupsConfigurationRequestProto
VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_REQUEST =
RefreshSuperUserGroupsConfigurationRequestProto.newBuilder().build();
public RefreshUserMappingsProtocolClientSideTranslatorPB(
RefreshUserMappingsProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);
}
@Override
public void refreshUserToGroupsMappings() throws IOException {
try {
rpcProxy.refreshUserToGroupsMappings(NULL_CONTROLLER,
VOID_REFRESH_USER_TO_GROUPS_MAPPING_REQUEST);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
}
@Override
public void refreshSuperUserGroupsConfiguration() throws IOException {
try {
rpcProxy.refreshSuperUserGroupsConfiguration(NULL_CONTROLLER,
VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_REQUEST);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
}
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil
.isMethodSupported(rpcProxy, RefreshUserMappingsProtocolPB.class,
RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(RefreshUserMappingsProtocolPB.class),
methodName);
}
}
| 3,379 | 36.977528 | 122 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolPB;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements
ProtocolMetaInterface, RefreshAuthorizationPolicyProtocol, Closeable {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final RefreshAuthorizationPolicyProtocolPB rpcProxy;
private final static RefreshServiceAclRequestProto
VOID_REFRESH_SERVICE_ACL_REQUEST =
RefreshServiceAclRequestProto.newBuilder().build();
public RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(
RefreshAuthorizationPolicyProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);
}
@Override
public void refreshServiceAcl() throws IOException {
try {
rpcProxy.refreshServiceAcl(NULL_CONTROLLER,
VOID_REFRESH_SERVICE_ACL_REQUEST);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
}
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy,
RefreshAuthorizationPolicyProtocolPB.class,
RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(RefreshAuthorizationPolicyProtocolPB.class),
methodName);
}
}
| 2,795 | 36.28 | 111 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@ProtocolInfo(
protocolName = "org.apache.hadoop.security.RefreshUserMappingsProtocol",
protocolVersion = 1)
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface RefreshUserMappingsProtocolPB extends
RefreshUserMappingsProtocolService.BlockingInterface {
}
| 1,648 | 42.394737 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.protocolPB;
import java.io.IOException;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements
RefreshAuthorizationPolicyProtocolPB {
private final RefreshAuthorizationPolicyProtocol impl;
private final static RefreshServiceAclResponseProto
VOID_REFRESH_SERVICE_ACL_RESPONSE = RefreshServiceAclResponseProto
.newBuilder().build();
public RefreshAuthorizationPolicyProtocolServerSideTranslatorPB(
RefreshAuthorizationPolicyProtocol impl) {
this.impl = impl;
}
@Override
public RefreshServiceAclResponseProto refreshServiceAcl(
RpcController controller, RefreshServiceAclRequestProto request)
throws ServiceException {
try {
impl.refreshServiceAcl();
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REFRESH_SERVICE_ACL_RESPONSE;
}
}
| 2,099 | 36.5 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.protocolPB;
import java.io.IOException;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto;
import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationResponseProto;
import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class RefreshUserMappingsProtocolServerSideTranslatorPB implements RefreshUserMappingsProtocolPB {
private final RefreshUserMappingsProtocol impl;
private final static RefreshUserToGroupsMappingsResponseProto
VOID_REFRESH_USER_GROUPS_MAPPING_RESPONSE =
RefreshUserToGroupsMappingsResponseProto.newBuilder().build();
private final static RefreshSuperUserGroupsConfigurationResponseProto
VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_RESPONSE =
RefreshSuperUserGroupsConfigurationResponseProto.newBuilder()
.build();
public RefreshUserMappingsProtocolServerSideTranslatorPB(RefreshUserMappingsProtocol impl) {
this.impl = impl;
}
@Override
public RefreshUserToGroupsMappingsResponseProto
refreshUserToGroupsMappings(RpcController controller,
RefreshUserToGroupsMappingsRequestProto request)
throws ServiceException {
try {
impl.refreshUserToGroupsMappings();
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REFRESH_USER_GROUPS_MAPPING_RESPONSE;
}
@Override
public RefreshSuperUserGroupsConfigurationResponseProto
refreshSuperUserGroupsConfiguration(RpcController controller,
RefreshSuperUserGroupsConfigurationRequestProto request)
throws ServiceException {
try {
impl.refreshSuperUserGroupsConfiguration();
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_RESPONSE;
}
}
| 3,028 | 39.386667 | 123 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@ProtocolInfo(
protocolName = "org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol",
protocolVersion = 1)
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface RefreshAuthorizationPolicyProtocolPB extends
RefreshAuthorizationPolicyProtocolService.BlockingInterface {
}
| 1,691 | 43.526316 | 123 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Evolving
package org.apache.hadoop.security.authorize;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,066 | 45.391304 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/Service.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import java.security.Permission;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* An abstract definition of <em>service</em> as related to
* Service Level Authorization for Hadoop.
*
* Each service defines it's configuration key and also the necessary
* {@link Permission} required to access the service.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class Service {
private String key;
private Class<?> protocol;
public Service(String key, Class<?> protocol) {
this.key = key;
this.protocol = protocol;
}
/**
* Get the configuration key for the service.
* @return the configuration key for the service
*/
public String getServiceKey() {
return key;
}
/**
* Get the protocol for the service
* @return the {@link Class} for the protocol
*/
public Class<?> getProtocol() {
return protocol;
}
}
| 1,845 | 30.288136 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AuthorizationException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import java.io.PrintStream;
import java.io.PrintWriter;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.AccessControlException;
/**
* An exception class for authorization-related issues.
*
* This class <em>does not</em> provide the stack trace for security purposes.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "YARN"})
@InterfaceStability.Evolving
public class AuthorizationException extends AccessControlException {
private static final long serialVersionUID = 1L;
public AuthorizationException() {
super();
}
public AuthorizationException(String message) {
super(message);
}
/**
* Constructs a new exception with the specified cause and a detail
* message of <tt>(cause==null ? null : cause.toString())</tt> (which
* typically contains the class and detail message of <tt>cause</tt>).
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
*/
public AuthorizationException(Throwable cause) {
super(cause);
}
private static StackTraceElement[] stackTrace = new StackTraceElement[0];
@Override
public StackTraceElement[] getStackTrace() {
// Do not provide the stack-trace
return stackTrace;
}
@Override
public void printStackTrace() {
// Do not provide the stack-trace
}
@Override
public void printStackTrace(PrintStream s) {
// Do not provide the stack-trace
}
@Override
public void printStackTrace(PrintWriter s) {
// Do not provide the stack-trace
}
}
| 2,618 | 31.333333 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/DefaultImpersonationProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.MachineList;
import com.google.common.annotations.VisibleForTesting;
@InterfaceStability.Unstable
@InterfaceAudience.Public
public class DefaultImpersonationProvider implements ImpersonationProvider {
private static final String CONF_HOSTS = ".hosts";
private static final String CONF_USERS = ".users";
private static final String CONF_GROUPS = ".groups";
// acl and list of hosts per proxyuser
private Map<String, AccessControlList> proxyUserAcl =
new HashMap<String, AccessControlList>();
private Map<String, MachineList> proxyHosts =
new HashMap<String, MachineList>();
private Configuration conf;
private static DefaultImpersonationProvider testProvider;
public static synchronized DefaultImpersonationProvider getTestProvider() {
if (testProvider == null) {
testProvider = new DefaultImpersonationProvider();
testProvider.setConf(new Configuration());
testProvider.init(ProxyUsers.CONF_HADOOP_PROXYUSER);
}
return testProvider;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
private String configPrefix;
@Override
public void init(String configurationPrefix) {
configPrefix = configurationPrefix +
(configurationPrefix.endsWith(".") ? "" : ".");
// constructing regex to match the following patterns:
// $configPrefix.[ANY].users
// $configPrefix.[ANY].groups
// $configPrefix.[ANY].hosts
//
String prefixRegEx = configPrefix.replace(".", "\\.");
String usersGroupsRegEx = prefixRegEx + "[^.]*(" +
Pattern.quote(CONF_USERS) + "|" + Pattern.quote(CONF_GROUPS) + ")";
String hostsRegEx = prefixRegEx + "[^.]*" + Pattern.quote(CONF_HOSTS);
// get list of users and groups per proxyuser
Map<String,String> allMatchKeys =
conf.getValByRegex(usersGroupsRegEx);
for(Entry<String, String> entry : allMatchKeys.entrySet()) {
String aclKey = getAclKey(entry.getKey());
if (!proxyUserAcl.containsKey(aclKey)) {
proxyUserAcl.put(aclKey, new AccessControlList(
allMatchKeys.get(aclKey + CONF_USERS) ,
allMatchKeys.get(aclKey + CONF_GROUPS)));
}
}
// get hosts per proxyuser
allMatchKeys = conf.getValByRegex(hostsRegEx);
for(Entry<String, String> entry : allMatchKeys.entrySet()) {
proxyHosts.put(entry.getKey(),
new MachineList(entry.getValue()));
}
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void authorize(UserGroupInformation user,
String remoteAddress) throws AuthorizationException {
UserGroupInformation realUser = user.getRealUser();
if (realUser == null) {
return;
}
AccessControlList acl = proxyUserAcl.get(configPrefix +
realUser.getShortUserName());
if (acl == null || !acl.isUserAllowed(user)) {
throw new AuthorizationException("User: " + realUser.getUserName()
+ " is not allowed to impersonate " + user.getUserName());
}
MachineList MachineList = proxyHosts.get(
getProxySuperuserIpConfKey(realUser.getShortUserName()));
if(MachineList == null || !MachineList.includes(remoteAddress)) {
throw new AuthorizationException("Unauthorized connection for super-user: "
+ realUser.getUserName() + " from IP " + remoteAddress);
}
}
private String getAclKey(String key) {
int endIndex = key.lastIndexOf(".");
if (endIndex != -1) {
return key.substring(0, endIndex);
}
return key;
}
/**
* Returns configuration key for effective usergroups allowed for a superuser
*
* @param userName name of the superuser
* @return configuration key for superuser usergroups
*/
public String getProxySuperuserUserConfKey(String userName) {
return configPrefix + userName + CONF_USERS;
}
/**
* Returns configuration key for effective groups allowed for a superuser
*
* @param userName name of the superuser
* @return configuration key for superuser groups
*/
public String getProxySuperuserGroupConfKey(String userName) {
return configPrefix + userName + CONF_GROUPS;
}
/**
* Return configuration key for superuser ip addresses
*
* @param userName name of the superuser
* @return configuration key for superuser ip-addresses
*/
public String getProxySuperuserIpConfKey(String userName) {
return configPrefix + userName + CONF_HOSTS;
}
@VisibleForTesting
public Map<String, Collection<String>> getProxyGroups() {
Map<String,Collection<String>> proxyGroups = new HashMap<String,Collection<String>>();
for(Entry<String, AccessControlList> entry : proxyUserAcl.entrySet()) {
proxyGroups.put(entry.getKey() + CONF_GROUPS, entry.getValue().getGroups());
}
return proxyGroups;
}
@VisibleForTesting
public Map<String, Collection<String>> getProxyHosts() {
Map<String, Collection<String>> tmpProxyHosts =
new HashMap<String, Collection<String>>();
for (Map.Entry<String, MachineList> proxyHostEntry :proxyHosts.entrySet()) {
tmpProxyHosts.put(proxyHostEntry.getKey(),
proxyHostEntry.getValue().getCollection());
}
return tmpProxyHosts;
}
}
| 6,496 | 33.194737 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.annotations.VisibleForTesting;
@InterfaceStability.Unstable
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive"})
public class ProxyUsers {
public static final String CONF_HADOOP_PROXYUSER = "hadoop.proxyuser";
private static volatile ImpersonationProvider sip ;
/**
* Returns an instance of ImpersonationProvider.
* Looks up the configuration to see if there is custom class specified.
* @param conf
* @return ImpersonationProvider
*/
private static ImpersonationProvider getInstance(Configuration conf) {
Class<? extends ImpersonationProvider> clazz =
conf.getClass(
CommonConfigurationKeysPublic.HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS,
DefaultImpersonationProvider.class, ImpersonationProvider.class);
return ReflectionUtils.newInstance(clazz, conf);
}
/**
* refresh Impersonation rules
*/
public static void refreshSuperUserGroupsConfiguration() {
//load server side configuration;
refreshSuperUserGroupsConfiguration(new Configuration());
}
/**
* Refreshes configuration using the specified Proxy user prefix for
* properties.
*
* @param conf configuration
* @param proxyUserPrefix proxy user configuration prefix
*/
public static void refreshSuperUserGroupsConfiguration(Configuration conf,
String proxyUserPrefix) {
Preconditions.checkArgument(proxyUserPrefix != null &&
!proxyUserPrefix.isEmpty(), "prefix cannot be NULL or empty");
// sip is volatile. Any assignment to it as well as the object's state
// will be visible to all the other threads.
ImpersonationProvider ip = getInstance(conf);
ip.init(proxyUserPrefix);
sip = ip;
ProxyServers.refresh(conf);
}
/**
* Refreshes configuration using the default Proxy user prefix for properties.
* @param conf configuration
*/
public static void refreshSuperUserGroupsConfiguration(Configuration conf) {
refreshSuperUserGroupsConfiguration(conf, CONF_HADOOP_PROXYUSER);
}
/**
* Authorize the superuser which is doing doAs
*
* @param user ugi of the effective or proxy user which contains a real user
* @param remoteAddress the ip address of client
* @throws AuthorizationException
*/
public static void authorize(UserGroupInformation user,
String remoteAddress) throws AuthorizationException {
if (sip==null) {
// In a race situation, It is possible for multiple threads to satisfy this condition.
// The last assignment will prevail.
refreshSuperUserGroupsConfiguration();
}
sip.authorize(user, remoteAddress);
}
/**
* This function is kept to provide backward compatibility.
* @param user
* @param remoteAddress
* @param conf
* @throws AuthorizationException
* @deprecated use {@link #authorize(UserGroupInformation, String) instead.
*/
@Deprecated
public static void authorize(UserGroupInformation user,
String remoteAddress, Configuration conf) throws AuthorizationException {
authorize(user,remoteAddress);
}
@VisibleForTesting
public static DefaultImpersonationProvider getDefaultImpersonationProvider() {
return ((DefaultImpersonationProvider)sip);
}
}
| 4,503 | 35.032 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
/**
* Class representing a configured access control list.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class AccessControlList implements Writable {
static { // register a ctor
WritableFactories.setFactory
(AccessControlList.class,
new WritableFactory() {
@Override
public Writable newInstance() { return new AccessControlList(); }
});
}
// Indicates an ACL string that represents access to all users
public static final String WILDCARD_ACL_VALUE = "*";
private static final int INITIAL_CAPACITY = 256;
// Set of users who are granted access.
private Collection<String> users;
// Set of groups which are granted access
private Collection<String> groups;
// Whether all users are granted access.
private boolean allAllowed;
private Groups groupsMapping = Groups.getUserToGroupsMappingService(new Configuration());
/**
* This constructor exists primarily for AccessControlList to be Writable.
*/
public AccessControlList() {
}
/**
* Construct a new ACL from a String representation of the same.
*
* The String is a a comma separated list of users and groups.
* The user list comes first and is separated by a space followed
* by the group list. For e.g. "user1,user2 group1,group2"
*
* @param aclString String representation of the ACL
*/
public AccessControlList(String aclString) {
buildACL(aclString.split(" ", 2));
}
/**
* Construct a new ACL from String representation of users and groups
*
* The arguments are comma separated lists
*
* @param users comma separated list of users
* @param groups comma separated list of groups
*/
public AccessControlList(String users, String groups) {
buildACL(new String[] {users, groups});
}
/**
* Build ACL from the given two Strings.
* The Strings contain comma separated values.
*
* @param aclString build ACL from array of Strings
*/
private void buildACL(String[] userGroupStrings) {
users = new HashSet<String>();
groups = new HashSet<String>();
for (String aclPart : userGroupStrings) {
if (aclPart != null && isWildCardACLValue(aclPart)) {
allAllowed = true;
break;
}
}
if (!allAllowed) {
if (userGroupStrings.length >= 1 && userGroupStrings[0] != null) {
users = StringUtils.getTrimmedStringCollection(userGroupStrings[0]);
}
if (userGroupStrings.length == 2 && userGroupStrings[1] != null) {
groups = StringUtils.getTrimmedStringCollection(userGroupStrings[1]);
groupsMapping.cacheGroupsAdd(new LinkedList<String>(groups));
}
}
}
/**
* Checks whether ACL string contains wildcard
*
* @param aclString check this ACL string for wildcard
* @return true if ACL string contains wildcard false otherwise
*/
private boolean isWildCardACLValue(String aclString) {
if (aclString.contains(WILDCARD_ACL_VALUE) &&
aclString.trim().equals(WILDCARD_ACL_VALUE)) {
return true;
}
return false;
}
public boolean isAllAllowed() {
return allAllowed;
}
/**
* Add user to the names of users allowed for this service.
*
* @param user
* The user name
*/
public void addUser(String user) {
if (isWildCardACLValue(user)) {
throw new IllegalArgumentException("User " + user + " can not be added");
}
if (!isAllAllowed()) {
users.add(user);
}
}
/**
* Add group to the names of groups allowed for this service.
*
* @param group
* The group name
*/
public void addGroup(String group) {
if (isWildCardACLValue(group)) {
throw new IllegalArgumentException("Group " + group + " can not be added");
}
if (!isAllAllowed()) {
List<String> groupsList = new LinkedList<String>();
groupsList.add(group);
groupsMapping.cacheGroupsAdd(groupsList);
groups.add(group);
}
}
/**
* Remove user from the names of users allowed for this service.
*
* @param user
* The user name
*/
public void removeUser(String user) {
if (isWildCardACLValue(user)) {
throw new IllegalArgumentException("User " + user + " can not be removed");
}
if (!isAllAllowed()) {
users.remove(user);
}
}
/**
* Remove group from the names of groups allowed for this service.
*
* @param group
* The group name
*/
public void removeGroup(String group) {
if (isWildCardACLValue(group)) {
throw new IllegalArgumentException("Group " + group
+ " can not be removed");
}
if (!isAllAllowed()) {
groups.remove(group);
}
}
/**
* Get the names of users allowed for this service.
* @return the set of user names. the set must not be modified.
*/
public Collection<String> getUsers() {
return users;
}
/**
* Get the names of user groups allowed for this service.
* @return the set of group names. the set must not be modified.
*/
public Collection<String> getGroups() {
return groups;
}
/**
* Checks if a user represented by the provided {@link UserGroupInformation}
* is a member of the Access Control List
* @param ugi UserGroupInformation to check if contained in the ACL
* @return true if ugi is member of the list
*/
public final boolean isUserInList(UserGroupInformation ugi) {
if (allAllowed || users.contains(ugi.getShortUserName())) {
return true;
} else {
for(String group: ugi.getGroupNames()) {
if (groups.contains(group)) {
return true;
}
}
}
return false;
}
public boolean isUserAllowed(UserGroupInformation ugi) {
return isUserInList(ugi);
}
/**
* Returns descriptive way of users and groups that are part of this ACL.
* Use {@link #getAclString()} to get the exact String that can be given to
* the constructor of AccessControlList to create a new instance.
*/
@Override
public String toString() {
String str = null;
if (allAllowed) {
str = "All users are allowed";
}
else if (users.isEmpty() && groups.isEmpty()) {
str = "No users are allowed";
}
else {
String usersStr = null;
String groupsStr = null;
if (!users.isEmpty()) {
usersStr = users.toString();
}
if (!groups.isEmpty()) {
groupsStr = groups.toString();
}
if (!users.isEmpty() && !groups.isEmpty()) {
str = "Users " + usersStr + " and members of the groups "
+ groupsStr + " are allowed";
}
else if (!users.isEmpty()) {
str = "Users " + usersStr + " are allowed";
}
else {// users is empty array and groups is nonempty
str = "Members of the groups "
+ groupsStr + " are allowed";
}
}
return str;
}
/**
* Returns the access control list as a String that can be used for building a
* new instance by sending it to the constructor of {@link AccessControlList}.
*/
public String getAclString() {
StringBuilder sb = new StringBuilder(INITIAL_CAPACITY);
if (allAllowed) {
sb.append('*');
}
else {
sb.append(getUsersString());
sb.append(" ");
sb.append(getGroupsString());
}
return sb.toString();
}
/**
* Serializes the AccessControlList object
*/
@Override
public void write(DataOutput out) throws IOException {
String aclString = getAclString();
Text.writeString(out, aclString);
}
/**
* Deserializes the AccessControlList object
*/
@Override
public void readFields(DataInput in) throws IOException {
String aclString = Text.readString(in);
buildACL(aclString.split(" ", 2));
}
/**
* Returns comma-separated concatenated single String of the set 'users'
*
* @return comma separated list of users
*/
private String getUsersString() {
return getString(users);
}
/**
* Returns comma-separated concatenated single String of the set 'groups'
*
* @return comma separated list of groups
*/
private String getGroupsString() {
return getString(groups);
}
/**
* Returns comma-separated concatenated single String of all strings of
* the given set
*
* @param strings set of strings to concatenate
*/
private String getString(Collection<String> strings) {
StringBuilder sb = new StringBuilder(INITIAL_CAPACITY);
boolean first = true;
for(String str: strings) {
if (!first) {
sb.append(",");
} else {
first = false;
}
sb.append(str);
}
return sb.toString();
}
}
| 10,248 | 27.390582 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyServers.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.HashSet;
import org.apache.hadoop.conf.Configuration;
public class ProxyServers {
public static final String CONF_HADOOP_PROXYSERVERS = "hadoop.proxyservers";
private static volatile Collection<String> proxyServers;
public static void refresh() {
refresh(new Configuration());
}
public static void refresh(Configuration conf){
Collection<String> tempServers = new HashSet<String>();
// trusted proxy servers such as http proxies
for (String host : conf.getTrimmedStrings(CONF_HADOOP_PROXYSERVERS)) {
InetSocketAddress addr = new InetSocketAddress(host, 0);
if (!addr.isUnresolved()) {
tempServers.add(addr.getAddress().getHostAddress());
}
}
proxyServers = tempServers;
}
public static boolean isProxyServer(String remoteAddr) {
if (proxyServers == null) {
refresh();
}
return proxyServers.contains(remoteAddr);
}
}
| 1,843 | 33.148148 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/PolicyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import java.security.Policy;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* {@link PolicyProvider} provides the {@link Service} definitions to the
* security {@link Policy} in effect for Hadoop.
*
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public abstract class PolicyProvider {
/**
* Configuration key for the {@link PolicyProvider} implementation.
*/
public static final String POLICY_PROVIDER_CONFIG =
"hadoop.security.authorization.policyprovider";
/**
* A default {@link PolicyProvider} without any defined services.
*/
public static final PolicyProvider DEFAULT_POLICY_PROVIDER =
new PolicyProvider() {
@Override
public Service[] getServices() {
return null;
}
};
/**
* Get the {@link Service} definitions from the {@link PolicyProvider}.
* @return the {@link Service} definitions
*/
public abstract Service[] getServices();
}
| 1,886 | 32.105263 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ImpersonationProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.security.UserGroupInformation;
@InterfaceStability.Unstable
@InterfaceAudience.Public
public interface ImpersonationProvider extends Configurable {
/**
* Specifies the configuration prefix for the proxy user properties and
* initializes the provider.
*
* @param configurationPrefix the configuration prefix for the proxy user
* properties
*/
public void init(String configurationPrefix);
/**
* Authorize the superuser which is doing doAs
*
* @param user ugi of the effective or proxy user which contains a real user
* @param remoteAddress the ip address of client
* @throws AuthorizationException
*/
public void authorize(UserGroupInformation user, String remoteAddress)
throws AuthorizationException;
}
| 1,802 | 35.795918 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import java.io.IOException;
import java.net.InetAddress;
import java.util.IdentityHashMap;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.MachineList;
import com.google.common.annotations.VisibleForTesting;
/**
* An authorization manager which handles service-level authorization
* for incoming service requests.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class ServiceAuthorizationManager {
static final String BLOCKED = ".blocked";
static final String HOSTS = ".hosts";
private static final String HADOOP_POLICY_FILE = "hadoop-policy.xml";
// For each class, first ACL in the array specifies the allowed entries
// and second ACL specifies blocked entries.
private volatile Map<Class<?>, AccessControlList[]> protocolToAcls =
new IdentityHashMap<Class<?>, AccessControlList[]>();
// For each class, first MachineList in the array specifies the allowed entries
// and second MachineList specifies blocked entries.
private volatile Map<Class<?>, MachineList[]> protocolToMachineLists =
new IdentityHashMap<Class<?>, MachineList[]>();
/**
* Configuration key for controlling service-level authorization for Hadoop.
*
* @deprecated Use
* {@link CommonConfigurationKeys#HADOOP_SECURITY_AUTHORIZATION}
* instead.
*/
@Deprecated
public static final String SERVICE_AUTHORIZATION_CONFIG =
"hadoop.security.authorization";
public static final Log AUDITLOG =
LogFactory.getLog("SecurityLogger."+ServiceAuthorizationManager.class.getName());
private static final String AUTHZ_SUCCESSFUL_FOR = "Authorization successful for ";
private static final String AUTHZ_FAILED_FOR = "Authorization failed for ";
/**
* Authorize the user to access the protocol being used.
*
* @param user user accessing the service
* @param protocol service being accessed
* @param conf configuration to use
* @param addr InetAddress of the client
* @throws AuthorizationException on authorization failure
*/
public void authorize(UserGroupInformation user,
Class<?> protocol,
Configuration conf,
InetAddress addr
) throws AuthorizationException {
AccessControlList[] acls = protocolToAcls.get(protocol);
MachineList[] hosts = protocolToMachineLists.get(protocol);
if (acls == null || hosts == null) {
throw new AuthorizationException("Protocol " + protocol +
" is not known.");
}
// get client principal key to verify (if available)
KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf);
String clientPrincipal = null;
if (krbInfo != null) {
String clientKey = krbInfo.clientPrincipal();
if (clientKey != null && !clientKey.isEmpty()) {
try {
clientPrincipal = SecurityUtil.getServerPrincipal(
conf.get(clientKey), addr);
} catch (IOException e) {
throw (AuthorizationException) new AuthorizationException(
"Can't figure out Kerberos principal name for connection from "
+ addr + " for user=" + user + " protocol=" + protocol)
.initCause(e);
}
}
}
if((clientPrincipal != null && !clientPrincipal.equals(user.getUserName())) ||
acls.length != 2 || !acls[0].isUserAllowed(user) || acls[1].isUserAllowed(user)) {
AUDITLOG.warn(AUTHZ_FAILED_FOR + user + " for protocol=" + protocol
+ ", expected client Kerberos principal is " + clientPrincipal);
throw new AuthorizationException("User " + user +
" is not authorized for protocol " + protocol +
", expected client Kerberos principal is " + clientPrincipal);
}
if (addr != null) {
String hostAddress = addr.getHostAddress();
if (hosts.length != 2 || !hosts[0].includes(hostAddress) ||
hosts[1].includes(hostAddress)) {
AUDITLOG.warn(AUTHZ_FAILED_FOR + " for protocol=" + protocol
+ " from host = " + hostAddress);
throw new AuthorizationException("Host " + hostAddress +
" is not authorized for protocol " + protocol) ;
}
}
AUDITLOG.info(AUTHZ_SUCCESSFUL_FOR + user + " for protocol="+protocol);
}
public void refresh(Configuration conf,
PolicyProvider provider) {
// Get the system property 'hadoop.policy.file'
String policyFile =
System.getProperty("hadoop.policy.file", HADOOP_POLICY_FILE);
// Make a copy of the original config, and load the policy file
Configuration policyConf = new Configuration(conf);
policyConf.addResource(policyFile);
refreshWithLoadedConfiguration(policyConf, provider);
}
@Private
public void refreshWithLoadedConfiguration(Configuration conf,
PolicyProvider provider) {
final Map<Class<?>, AccessControlList[]> newAcls =
new IdentityHashMap<Class<?>, AccessControlList[]>();
final Map<Class<?>, MachineList[]> newMachineLists =
new IdentityHashMap<Class<?>, MachineList[]>();
String defaultAcl = conf.get(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL,
AccessControlList.WILDCARD_ACL_VALUE);
String defaultBlockedAcl = conf.get(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL, "");
String defaultServiceHostsKey = getHostKey(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL);
String defaultMachineList = conf.get(defaultServiceHostsKey,
MachineList.WILDCARD_VALUE);
String defaultBlockedMachineList= conf.get(
defaultServiceHostsKey+ BLOCKED, "");
// Parse the config file
Service[] services = provider.getServices();
if (services != null) {
for (Service service : services) {
AccessControlList acl =
new AccessControlList(
conf.get(service.getServiceKey(),
defaultAcl)
);
AccessControlList blockedAcl =
new AccessControlList(
conf.get(service.getServiceKey() + BLOCKED,
defaultBlockedAcl));
newAcls.put(service.getProtocol(), new AccessControlList[] {acl, blockedAcl});
String serviceHostsKey = getHostKey(service.getServiceKey());
MachineList machineList = new MachineList (conf.get(serviceHostsKey, defaultMachineList));
MachineList blockedMachineList = new MachineList(
conf.get(serviceHostsKey + BLOCKED, defaultBlockedMachineList));
newMachineLists.put(service.getProtocol(),
new MachineList[] {machineList, blockedMachineList});
}
}
// Flip to the newly parsed permissions
protocolToAcls = newAcls;
protocolToMachineLists = newMachineLists;
}
private String getHostKey(String serviceKey) {
int endIndex = serviceKey.lastIndexOf(".");
if (endIndex != -1) {
return serviceKey.substring(0, endIndex)+ HOSTS;
}
return serviceKey;
}
@VisibleForTesting
public Set<Class<?>> getProtocolsWithAcls() {
return protocolToAcls.keySet();
}
@VisibleForTesting
public AccessControlList getProtocolsAcls(Class<?> className) {
return protocolToAcls.get(className)[0];
}
@VisibleForTesting
public AccessControlList getProtocolsBlockedAcls(Class<?> className) {
return protocolToAcls.get(className)[1];
}
@VisibleForTesting
public Set<Class<?>> getProtocolsWithMachineLists() {
return protocolToMachineLists.keySet();
}
@VisibleForTesting
public MachineList getProtocolsMachineList(Class<?> className) {
return protocolToMachineLists.get(className)[0];
}
@VisibleForTesting
public MachineList getProtocolsBlockedMachineList(Class<?> className) {
return protocolToMachineLists.get(className)[1];
}
}
| 9,392 | 38.466387 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol which is used to refresh the authorization policy in use currently.
*/
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public interface RefreshAuthorizationPolicyProtocol {
/**
* Version 1: Initial version
*/
public static final long versionID = 1L;
/**
* Refresh the service-level authorization policy in-effect.
* @throws IOException
*/
@Idempotent
void refreshServiceAcl() throws IOException;
}
| 1,730 | 34.326531 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.alias;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
/**
* A CredentialProvider for UGIs. It uses the credentials object associated
* with the current user to find credentials. This provider is created using a
* URI of "user:///".
*/
@InterfaceAudience.Private
public class UserProvider extends CredentialProvider {
public static final String SCHEME_NAME = "user";
private final UserGroupInformation user;
private final Credentials credentials;
private UserProvider() throws IOException {
user = UserGroupInformation.getCurrentUser();
credentials = user.getCredentials();
}
@Override
public boolean isTransient() {
return true;
}
@Override
public CredentialEntry getCredentialEntry(String alias) {
byte[] bytes = credentials.getSecretKey(new Text(alias));
if (bytes == null) {
return null;
}
return new CredentialEntry(
alias, new String(bytes, Charsets.UTF_8).toCharArray());
}
@Override
public CredentialEntry createCredentialEntry(String name, char[] credential)
throws IOException {
Text nameT = new Text(name);
if (credentials.getSecretKey(nameT) != null) {
throw new IOException("Credential " + name +
" already exists in " + this);
}
credentials.addSecretKey(new Text(name),
new String(credential).getBytes("UTF-8"));
return new CredentialEntry(name, credential);
}
@Override
public void deleteCredentialEntry(String name) throws IOException {
byte[] cred = credentials.getSecretKey(new Text(name));
if (cred != null) {
credentials.removeSecretKey(new Text(name));
}
else {
throw new IOException("Credential " + name +
" does not exist in " + this);
}
}
@Override
public String toString() {
return SCHEME_NAME + ":///";
}
@Override
public void flush() {
user.addCredentials(credentials);
}
public static class Factory extends CredentialProviderFactory {
@Override
public CredentialProvider createProvider(URI providerName,
Configuration conf) throws IOException {
if (SCHEME_NAME.equals(providerName.getScheme())) {
return new UserProvider();
}
return null;
}
}
@Override
public List<String> getAliases() throws IOException {
List<String> list = new ArrayList<String>();
List<Text> aliases = credentials.getAllSecretKeys();
for (Text key : aliases) {
list.add(key.toString());
}
return list;
}
}
| 3,701 | 29.595041 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.alias;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.ProviderUtils;
import com.google.common.base.Charsets;
import javax.crypto.spec.SecretKeySpec;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URL;
import java.security.KeyStore;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.UnrecoverableKeyException;
import java.security.cert.CertificateException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.List;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Abstract class for implementing credential providers that are based on
* Java Keystores as the underlying credential store.
*
* The password for the keystore is taken from the HADOOP_CREDSTORE_PASSWORD
* environment variable with a default of 'none'.
*
* It is expected that for access to credential protected resource to copy the
* creds from the original provider into the job's Credentials object, which is
* accessed via the UserProvider. Therefore, these providers won't be directly
* used by MapReduce tasks.
*/
@InterfaceAudience.Private
public abstract class AbstractJavaKeyStoreProvider extends CredentialProvider {
public static final Log LOG = LogFactory.getLog(
AbstractJavaKeyStoreProvider.class);
public static final String CREDENTIAL_PASSWORD_NAME =
"HADOOP_CREDSTORE_PASSWORD";
public static final String KEYSTORE_PASSWORD_FILE_KEY =
"hadoop.security.credstore.java-keystore-provider.password-file";
public static final String KEYSTORE_PASSWORD_DEFAULT = "none";
private Path path;
private final URI uri;
private final KeyStore keyStore;
private char[] password = null;
private boolean changed = false;
private Lock readLock;
private Lock writeLock;
protected AbstractJavaKeyStoreProvider(URI uri, Configuration conf)
throws IOException {
this.uri = uri;
initFileSystem(uri, conf);
// Get the password from the user's environment
if (System.getenv().containsKey(CREDENTIAL_PASSWORD_NAME)) {
password = System.getenv(CREDENTIAL_PASSWORD_NAME).toCharArray();
}
// if not in ENV get check for file
if (password == null) {
String pwFile = conf.get(KEYSTORE_PASSWORD_FILE_KEY);
if (pwFile != null) {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL pwdFile = cl.getResource(pwFile);
if (pwdFile != null) {
try (InputStream is = pwdFile.openStream()) {
password = IOUtils.toString(is).trim().toCharArray();
}
}
}
}
if (password == null) {
password = KEYSTORE_PASSWORD_DEFAULT.toCharArray();
}
try {
keyStore = KeyStore.getInstance("jceks");
if (keystoreExists()) {
stashOriginalFilePermissions();
try (InputStream in = getInputStreamForFile()) {
keyStore.load(in, password);
}
} else {
createPermissions("700");
// required to create an empty keystore. *sigh*
keyStore.load(null, password);
}
} catch (KeyStoreException e) {
throw new IOException("Can't create keystore", e);
} catch (NoSuchAlgorithmException e) {
throw new IOException("Can't load keystore " + getPathAsString(), e);
} catch (CertificateException e) {
throw new IOException("Can't load keystore " + getPathAsString(), e);
}
ReadWriteLock lock = new ReentrantReadWriteLock(true);
readLock = lock.readLock();
writeLock = lock.writeLock();
}
public Path getPath() {
return path;
}
public void setPath(Path p) {
this.path = p;
}
public char[] getPassword() {
return password;
}
public void setPassword(char[] pass) {
this.password = pass;
}
public boolean isChanged() {
return changed;
}
public void setChanged(boolean chg) {
this.changed = chg;
}
public Lock getReadLock() {
return readLock;
}
public void setReadLock(Lock rl) {
this.readLock = rl;
}
public Lock getWriteLock() {
return writeLock;
}
public void setWriteLock(Lock wl) {
this.writeLock = wl;
}
public URI getUri() {
return uri;
}
public KeyStore getKeyStore() {
return keyStore;
}
protected final String getPathAsString() {
return getPath().toString();
}
protected abstract String getSchemeName();
protected abstract OutputStream getOutputStreamForKeystore()
throws IOException;
protected abstract boolean keystoreExists() throws IOException;
protected abstract InputStream getInputStreamForFile() throws IOException;
protected abstract void createPermissions(String perms) throws IOException;
protected abstract void stashOriginalFilePermissions() throws IOException;
protected void initFileSystem(URI keystoreUri, Configuration conf)
throws IOException {
path = ProviderUtils.unnestUri(keystoreUri);
if (LOG.isDebugEnabled()) {
LOG.debug("backing jks path initialized to " + path);
}
}
@Override
public CredentialEntry getCredentialEntry(String alias)
throws IOException {
readLock.lock();
try {
SecretKeySpec key = null;
try {
if (!keyStore.containsAlias(alias)) {
return null;
}
key = (SecretKeySpec) keyStore.getKey(alias, password);
} catch (KeyStoreException e) {
throw new IOException("Can't get credential " + alias + " from "
+ getPathAsString(), e);
} catch (NoSuchAlgorithmException e) {
throw new IOException("Can't get algorithm for credential " + alias
+ " from " + getPathAsString(), e);
} catch (UnrecoverableKeyException e) {
throw new IOException("Can't recover credential " + alias + " from "
+ getPathAsString(), e);
}
return new CredentialEntry(alias, bytesToChars(key.getEncoded()));
} finally {
readLock.unlock();
}
}
public static char[] bytesToChars(byte[] bytes) throws IOException {
String pass;
pass = new String(bytes, Charsets.UTF_8);
return pass.toCharArray();
}
@Override
public List<String> getAliases() throws IOException {
readLock.lock();
try {
ArrayList<String> list = new ArrayList<String>();
String alias = null;
try {
Enumeration<String> e = keyStore.aliases();
while (e.hasMoreElements()) {
alias = e.nextElement();
list.add(alias);
}
} catch (KeyStoreException e) {
throw new IOException("Can't get alias " + alias + " from "
+ getPathAsString(), e);
}
return list;
} finally {
readLock.unlock();
}
}
@Override
public CredentialEntry createCredentialEntry(String alias, char[] credential)
throws IOException {
writeLock.lock();
try {
if (keyStore.containsAlias(alias)) {
throw new IOException("Credential " + alias + " already exists in "
+ this);
}
return innerSetCredential(alias, credential);
} catch (KeyStoreException e) {
throw new IOException("Problem looking up credential " + alias + " in "
+ this, e);
} finally {
writeLock.unlock();
}
}
@Override
public void deleteCredentialEntry(String name) throws IOException {
writeLock.lock();
try {
try {
if (keyStore.containsAlias(name)) {
keyStore.deleteEntry(name);
} else {
throw new IOException("Credential " + name + " does not exist in "
+ this);
}
} catch (KeyStoreException e) {
throw new IOException("Problem removing " + name + " from " + this, e);
}
changed = true;
} finally {
writeLock.unlock();
}
}
CredentialEntry innerSetCredential(String alias, char[] material)
throws IOException {
writeLock.lock();
try {
keyStore.setKeyEntry(alias,
new SecretKeySpec(new String(material).getBytes("UTF-8"), "AES"),
password, null);
} catch (KeyStoreException e) {
throw new IOException("Can't store credential " + alias + " in " + this,
e);
} finally {
writeLock.unlock();
}
changed = true;
return new CredentialEntry(alias, material);
}
@Override
public void flush() throws IOException {
writeLock.lock();
try {
if (!changed) {
LOG.debug("Keystore hasn't changed, returning.");
return;
}
LOG.debug("Writing out keystore.");
try (OutputStream out = getOutputStreamForKeystore()) {
keyStore.store(out, password);
} catch (KeyStoreException e) {
throw new IOException("Can't store keystore " + this, e);
} catch (NoSuchAlgorithmException e) {
throw new IOException("No such algorithm storing keystore " + this, e);
} catch (CertificateException e) {
throw new IOException("Certificate exception storing keystore " + this,
e);
}
changed = false;
} finally {
writeLock.unlock();
}
}
@Override
public String toString() {
return uri.toString();
}
}
| 10,389 | 29.558824 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProviderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.alias;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.List;
import java.util.ServiceLoader;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* A factory to create a list of CredentialProvider based on the path given in a
* Configuration. It uses a service loader interface to find the available
* CredentialProviders and create them based on the list of URIs.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public abstract class CredentialProviderFactory {
public static final String CREDENTIAL_PROVIDER_PATH =
"hadoop.security.credential.provider.path";
public abstract CredentialProvider createProvider(URI providerName,
Configuration conf
) throws IOException;
private static final ServiceLoader<CredentialProviderFactory> serviceLoader =
ServiceLoader.load(CredentialProviderFactory.class);
public static List<CredentialProvider> getProviders(Configuration conf
) throws IOException {
List<CredentialProvider> result = new ArrayList<CredentialProvider>();
for(String path: conf.getStringCollection(CREDENTIAL_PROVIDER_PATH)) {
try {
URI uri = new URI(path);
boolean found = false;
for(CredentialProviderFactory factory: serviceLoader) {
CredentialProvider kp = factory.createProvider(uri, conf);
if (kp != null) {
result.add(kp);
found = true;
break;
}
}
if (!found) {
throw new IOException("No CredentialProviderFactory for " + uri + " in " +
CREDENTIAL_PROVIDER_PATH);
}
} catch (URISyntaxException error) {
throw new IOException("Bad configuration of " + CREDENTIAL_PROVIDER_PATH +
" at " + path, error);
}
}
return result;
}
}
| 2,952 | 37.350649 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.alias;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A provider of credentials or password for Hadoop applications. Provides an
* abstraction to separate credential storage from users of them. It
* is intended to support getting or storing passwords in a variety of ways,
* including third party bindings.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public abstract class CredentialProvider {
public static final String CLEAR_TEXT_FALLBACK
= "hadoop.security.credential.clear-text-fallback";
/**
* The combination of both the alias and the actual credential value.
*/
public static class CredentialEntry {
private final String alias;
private final char[] credential;
protected CredentialEntry(String alias,
char[] credential) {
this.alias = alias;
this.credential = credential;
}
public String getAlias() {
return alias;
}
public char[] getCredential() {
return credential;
}
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append("alias(");
buf.append(alias);
buf.append(")=");
if (credential == null) {
buf.append("null");
} else {
for(char c: credential) {
buf.append(c);
}
}
return buf.toString();
}
}
/**
* Indicates whether this provider represents a store
* that is intended for transient use - such as the UserProvider
* is. These providers are generally used to provide job access to
* passwords rather than for long term storage.
* @return true if transient, false otherwise
*/
public boolean isTransient() {
return false;
}
/**
* Ensures that any changes to the credentials are written to persistent store.
* @throws IOException
*/
public abstract void flush() throws IOException;
/**
* Get the credential entry for a specific alias.
* @param alias the name of a specific credential
* @return the credentialEntry
* @throws IOException
*/
public abstract CredentialEntry getCredentialEntry(String alias)
throws IOException;
/**
* Get the aliases for all credentials.
* @return the list of alias names
* @throws IOException
*/
public abstract List<String> getAliases() throws IOException;
/**
* Create a new credential. The given alias must not already exist.
* @param name the alias of the credential
* @param credential the credential value for the alias.
* @throws IOException
*/
public abstract CredentialEntry createCredentialEntry(String name,
char[] credential) throws IOException;
/**
* Delete the given credential.
* @param name the alias of the credential to delete
* @throws IOException
*/
public abstract void deleteCredentialEntry(String name) throws IOException;
}
| 3,833 | 29.672 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.alias;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
/**
* CredentialProvider based on Java's KeyStore file format. The file may be
* stored in any Hadoop FileSystem using the following name mangling:
* jceks://[email protected]/my/creds.jceks ->
* hdfs://nn1.example.com/my/creds.jceks jceks://file/home/larry/creds.jceks ->
* file:///home/larry/creds.jceks
*/
@InterfaceAudience.Private
public class JavaKeyStoreProvider extends AbstractJavaKeyStoreProvider {
public static final String SCHEME_NAME = "jceks";
private FileSystem fs;
private FsPermission permissions;
private JavaKeyStoreProvider(URI uri, Configuration conf)
throws IOException {
super(uri, conf);
}
@Override
protected String getSchemeName() {
return SCHEME_NAME;
}
@Override
protected OutputStream getOutputStreamForKeystore() throws IOException {
FSDataOutputStream out = FileSystem.create(fs, getPath(), permissions);
return out;
}
@Override
protected boolean keystoreExists() throws IOException {
return fs.exists(getPath());
}
@Override
protected InputStream getInputStreamForFile() throws IOException {
return fs.open(getPath());
}
@Override
protected void createPermissions(String perms) {
permissions = new FsPermission(perms);
}
@Override
protected void stashOriginalFilePermissions() throws IOException {
// save off permissions in case we need to
// rewrite the keystore in flush()
FileStatus s = fs.getFileStatus(getPath());
permissions = s.getPermission();
}
protected void initFileSystem(URI uri, Configuration conf)
throws IOException {
super.initFileSystem(uri, conf);
fs = getPath().getFileSystem(conf);
}
/**
* The factory to create JksProviders, which is used by the ServiceLoader.
*/
public static class Factory extends CredentialProviderFactory {
@Override
public CredentialProvider createProvider(URI providerName,
Configuration conf) throws IOException {
if (SCHEME_NAME.equals(providerName.getScheme())) {
return new JavaKeyStoreProvider(providerName, conf);
}
return null;
}
}
}
| 3,355 | 30.660377 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.alias;
import java.io.Console;
import java.io.IOException;
import java.io.PrintStream;
import java.security.InvalidParameterException;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This program is the CLI utility for the CredentialProvider facilities in
* Hadoop.
*/
public class CredentialShell extends Configured implements Tool {
final static private String USAGE_PREFIX = "Usage: hadoop credential " +
"[generic options]\n";
final static private String COMMANDS =
" [--help]\n" +
" [" + CreateCommand.USAGE + "]\n" +
" [" + DeleteCommand.USAGE + "]\n" +
" [" + ListCommand.USAGE + "]\n";
private boolean interactive = true;
private Command command = null;
/** allows stdout to be captured if necessary */
public PrintStream out = System.out;
/** allows stderr to be captured if necessary */
public PrintStream err = System.err;
private boolean userSuppliedProvider = false;
private String value = null;
private PasswordReader passwordReader;
@Override
public int run(String[] args) throws Exception {
int exitCode = 0;
try {
exitCode = init(args);
if (exitCode != 0) {
return exitCode;
}
if (command.validate()) {
command.execute();
} else {
exitCode = 1;
}
} catch (Exception e) {
e.printStackTrace(err);
return 1;
}
return exitCode;
}
/**
* Parse the command line arguments and initialize the data
* <pre>
* % hadoop credential create alias [-provider providerPath]
* % hadoop credential list [-provider providerPath]
* % hadoop credential delete alias [-provider providerPath] [-f]
* </pre>
* @param args
* @return 0 if the argument(s) were recognized, 1 otherwise
* @throws IOException
*/
protected int init(String[] args) throws IOException {
// no args should print the help message
if (0 == args.length) {
printCredShellUsage();
ToolRunner.printGenericCommandUsage(System.err);
return 1;
}
for (int i = 0; i < args.length; i++) { // parse command line
if (args[i].equals("create")) {
if (i == args.length - 1) {
printCredShellUsage();
return 1;
}
String alias = args[++i];
command = new CreateCommand(alias);
if (alias.equals("-help")) {
printCredShellUsage();
return 0;
}
} else if (args[i].equals("delete")) {
if (i == args.length - 1) {
printCredShellUsage();
return 1;
}
String alias = args[++i];
command = new DeleteCommand(alias);
if (alias.equals("-help")) {
printCredShellUsage();
return 0;
}
} else if (args[i].equals("list")) {
command = new ListCommand();
} else if (args[i].equals("-provider")) {
if (i == args.length - 1) {
printCredShellUsage();
return 1;
}
userSuppliedProvider = true;
getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
args[++i]);
} else if (args[i].equals("-f") || (args[i].equals("-force"))) {
interactive = false;
} else if (args[i].equals("-v") || (args[i].equals("-value"))) {
value = args[++i];
} else if (args[i].equals("-help")) {
printCredShellUsage();
return 0;
} else {
printCredShellUsage();
ToolRunner.printGenericCommandUsage(System.err);
return 1;
}
}
return 0;
}
private void printCredShellUsage() {
out.println(USAGE_PREFIX + COMMANDS);
if (command != null) {
out.println(command.getUsage());
}
else {
out.println("=========================================================" +
"======");
out.println(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC);
out.println("=========================================================" +
"======");
out.println(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC);
out.println("=========================================================" +
"======");
out.println(ListCommand.USAGE + ":\n\n" + ListCommand.DESC);
}
}
private abstract class Command {
protected CredentialProvider provider = null;
public boolean validate() {
return true;
}
protected CredentialProvider getCredentialProvider() {
CredentialProvider provider = null;
List<CredentialProvider> providers;
try {
providers = CredentialProviderFactory.getProviders(getConf());
if (userSuppliedProvider) {
provider = providers.get(0);
}
else {
for (CredentialProvider p : providers) {
if (!p.isTransient()) {
provider = p;
break;
}
}
}
} catch (IOException e) {
e.printStackTrace(err);
}
return provider;
}
protected void printProviderWritten() {
out.println(provider.getClass().getName() + " has been updated.");
}
protected void warnIfTransientProvider() {
if (provider.isTransient()) {
out.println("WARNING: you are modifying a transient provider.");
}
}
public abstract void execute() throws Exception;
public abstract String getUsage();
}
private class ListCommand extends Command {
public static final String USAGE = "list [-provider provider-path]";
public static final String DESC =
"The list subcommand displays the aliases contained within \n" +
"a particular provider - as configured in core-site.xml or " +
"indicated\nthrough the -provider argument.";
public boolean validate() {
boolean rc = true;
provider = getCredentialProvider();
if (provider == null) {
out.println("There are no non-transient CredentialProviders configured.\n"
+ "Consider using the -provider option to indicate the provider\n"
+ "to use. If you want to list a transient provider then you\n"
+ "you MUST use the -provider argument.");
rc = false;
}
return rc;
}
public void execute() throws IOException {
List<String> aliases;
try {
aliases = provider.getAliases();
out.println("Listing aliases for CredentialProvider: " + provider.toString());
for (String alias : aliases) {
out.println(alias);
}
} catch (IOException e) {
out.println("Cannot list aliases for CredentialProvider: " + provider.toString()
+ ": " + e.getMessage());
throw e;
}
}
@Override
public String getUsage() {
return USAGE + ":\n\n" + DESC;
}
}
private class DeleteCommand extends Command {
public static final String USAGE =
"delete <alias> [-f] [-provider provider-path]";
public static final String DESC =
"The delete subcommand deletes the credential\n" +
"specified as the <alias> argument from within the provider\n" +
"indicated through the -provider argument. The command asks for\n" +
"confirmation unless the -f option is specified.";
String alias = null;
boolean cont = true;
public DeleteCommand(String alias) {
this.alias = alias;
}
@Override
public boolean validate() {
provider = getCredentialProvider();
if (provider == null) {
out.println("There are no valid CredentialProviders configured.\n"
+ "Nothing will be deleted.\n"
+ "Consider using the -provider option to indicate the provider"
+ " to use.");
return false;
}
if (alias == null) {
out.println("There is no alias specified. Please provide the" +
"mandatory <alias>. See the usage description with -help.");
return false;
}
if (interactive) {
try {
cont = ToolRunner
.confirmPrompt("You are about to DELETE the credential " +
alias + " from CredentialProvider " + provider.toString() +
". Continue? ");
if (!cont) {
out.println("Nothing has been be deleted.");
}
return cont;
} catch (IOException e) {
out.println(alias + " will not be deleted.");
e.printStackTrace(err);
}
}
return true;
}
public void execute() throws IOException {
warnIfTransientProvider();
out.println("Deleting credential: " + alias + " from CredentialProvider: "
+ provider.toString());
if (cont) {
try {
provider.deleteCredentialEntry(alias);
out.println(alias + " has been successfully deleted.");
provider.flush();
printProviderWritten();
} catch (IOException e) {
out.println(alias + " has NOT been deleted.");
throw e;
}
}
}
@Override
public String getUsage() {
return USAGE + ":\n\n" + DESC;
}
}
private class CreateCommand extends Command {
public static final String USAGE =
"create <alias> [-provider provider-path]";
public static final String DESC =
"The create subcommand creates a new credential for the name specified\n" +
"as the <alias> argument within the provider indicated through\n" +
"the -provider argument.";
String alias = null;
public CreateCommand(String alias) {
this.alias = alias;
}
public boolean validate() {
boolean rc = true;
provider = getCredentialProvider();
if (provider == null) {
out.println("There are no valid CredentialProviders configured." +
"\nCredential will not be created.\n"
+ "Consider using the -provider option to indicate the provider" +
" to use.");
rc = false;
}
if (alias == null) {
out.println("There is no alias specified. Please provide the" +
"mandatory <alias>. See the usage description with -help.");
rc = false;
}
return rc;
}
public void execute() throws IOException, NoSuchAlgorithmException {
warnIfTransientProvider();
try {
char[] credential = null;
if (value != null) {
// testing only
credential = value.toCharArray();
}
else {
credential = promptForCredential();
}
provider.createCredentialEntry(alias, credential);
out.println(alias + " has been successfully created.");
provider.flush();
printProviderWritten();
} catch (InvalidParameterException e) {
out.println(alias + " has NOT been created. " + e.getMessage());
throw e;
} catch (IOException e) {
out.println(alias + " has NOT been created. " + e.getMessage());
throw e;
}
}
@Override
public String getUsage() {
return USAGE + ":\n\n" + DESC;
}
}
protected char[] promptForCredential() throws IOException {
PasswordReader c = getPasswordReader();
if (c == null) {
throw new IOException("No console available for prompting user.");
}
char[] cred = null;
boolean noMatch;
do {
char[] newPassword1 = c.readPassword("Enter password: ");
char[] newPassword2 = c.readPassword("Enter password again: ");
noMatch = !Arrays.equals(newPassword1, newPassword2);
if (noMatch) {
if (newPassword1 != null) Arrays.fill(newPassword1, ' ');
c.format("Passwords don't match. Try again.%n");
} else {
cred = newPassword1;
}
if (newPassword2 != null) Arrays.fill(newPassword2, ' ');
} while (noMatch);
return cred;
}
public PasswordReader getPasswordReader() {
if (passwordReader == null) {
passwordReader = new PasswordReader();
}
return passwordReader;
}
public void setPasswordReader(PasswordReader reader) {
passwordReader = reader;
}
// to facilitate testing since Console is a final class...
public static class PasswordReader {
public char[] readPassword(String prompt) {
Console console = System.console();
char[] pass = console.readPassword(prompt);
return pass;
}
public void format(String message) {
Console console = System.console();
console.format(message);
}
}
/**
* Main program.
*
* @param args
* Command line arguments
* @throws Exception
*/
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new CredentialShell(), args);
System.exit(res);
}
}
| 13,823 | 29.995516 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/LocalJavaKeyStoreProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.alias;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.EnumSet;
/**
* CredentialProvider based on Java's KeyStore file format. The file may be
* stored only on the local filesystem using the following name mangling:
* localjceks://file/home/larry/creds.jceks -> file:///home/larry/creds.jceks
*/
@InterfaceAudience.Private
public final class LocalJavaKeyStoreProvider extends
AbstractJavaKeyStoreProvider {
public static final String SCHEME_NAME = "localjceks";
private File file;
private Set<PosixFilePermission> permissions;
private LocalJavaKeyStoreProvider(URI uri, Configuration conf)
throws IOException {
super(uri, conf);
}
@Override
protected String getSchemeName() {
return SCHEME_NAME;
}
@Override
protected OutputStream getOutputStreamForKeystore() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("using '" + file + "' for output stream.");
}
FileOutputStream out = new FileOutputStream(file);
return out;
}
@Override
protected boolean keystoreExists() throws IOException {
/* The keystore loader doesn't handle zero length files. */
return file.exists() && (file.length() > 0);
}
@Override
protected InputStream getInputStreamForFile() throws IOException {
FileInputStream is = new FileInputStream(file);
return is;
}
@Override
protected void createPermissions(String perms) throws IOException {
int mode = 700;
try {
mode = Integer.parseInt(perms, 8);
} catch (NumberFormatException nfe) {
throw new IOException("Invalid permissions mode provided while "
+ "trying to createPermissions", nfe);
}
permissions = modeToPosixFilePermission(mode);
}
@Override
protected void stashOriginalFilePermissions() throws IOException {
// save off permissions in case we need to
// rewrite the keystore in flush()
if (!Shell.WINDOWS) {
Path path = Paths.get(file.getCanonicalPath());
permissions = Files.getPosixFilePermissions(path);
} else {
// On Windows, the JDK does not support the POSIX file permission APIs.
// Instead, we can do a winutils call and translate.
String[] cmd = Shell.getGetPermissionCommand();
String[] args = new String[cmd.length + 1];
System.arraycopy(cmd, 0, args, 0, cmd.length);
args[cmd.length] = file.getCanonicalPath();
String out = Shell.execCommand(args);
StringTokenizer t = new StringTokenizer(out, Shell.TOKEN_SEPARATOR_REGEX);
// The winutils output consists of 10 characters because of the leading
// directory indicator, i.e. "drwx------". The JDK parsing method expects
// a 9-character string, so remove the leading character.
String permString = t.nextToken().substring(1);
permissions = PosixFilePermissions.fromString(permString);
}
}
@Override
protected void initFileSystem(URI uri, Configuration conf)
throws IOException {
super.initFileSystem(uri, conf);
try {
file = new File(new URI(getPath().toString()));
if (LOG.isDebugEnabled()) {
LOG.debug("initialized local file as '" + file + "'.");
if (file.exists()) {
LOG.debug("the local file exists and is size " + file.length());
if (LOG.isTraceEnabled()) {
if (file.canRead()) {
LOG.trace("we can read the local file.");
}
if (file.canWrite()) {
LOG.trace("we can write the local file.");
}
}
} else {
LOG.debug("the local file does not exist.");
}
}
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
@Override
public void flush() throws IOException {
super.flush();
if (LOG.isDebugEnabled()) {
LOG.debug("Reseting permissions to '" + permissions + "'");
}
if (!Shell.WINDOWS) {
Files.setPosixFilePermissions(Paths.get(file.getCanonicalPath()),
permissions);
} else {
// FsPermission expects a 10-character string because of the leading
// directory indicator, i.e. "drwx------". The JDK toString method returns
// a 9-character string, so prepend a leading character.
FsPermission fsPermission = FsPermission.valueOf(
"-" + PosixFilePermissions.toString(permissions));
FileUtil.setPermission(file, fsPermission);
}
}
/**
* The factory to create JksProviders, which is used by the ServiceLoader.
*/
public static class Factory extends CredentialProviderFactory {
@Override
public CredentialProvider createProvider(URI providerName,
Configuration conf) throws IOException {
if (SCHEME_NAME.equals(providerName.getScheme())) {
return new LocalJavaKeyStoreProvider(providerName, conf);
}
return null;
}
}
private static Set<PosixFilePermission> modeToPosixFilePermission(
int mode) {
Set<PosixFilePermission> perms = EnumSet.noneOf(PosixFilePermission.class);
if ((mode & 0001) != 0) {
perms.add(PosixFilePermission.OTHERS_EXECUTE);
}
if ((mode & 0002) != 0) {
perms.add(PosixFilePermission.OTHERS_WRITE);
}
if ((mode & 0004) != 0) {
perms.add(PosixFilePermission.OTHERS_READ);
}
if ((mode & 0010) != 0) {
perms.add(PosixFilePermission.GROUP_EXECUTE);
}
if ((mode & 0020) != 0) {
perms.add(PosixFilePermission.GROUP_WRITE);
}
if ((mode & 0040) != 0) {
perms.add(PosixFilePermission.GROUP_READ);
}
if ((mode & 0100) != 0) {
perms.add(PosixFilePermission.OWNER_EXECUTE);
}
if ((mode & 0200) != 0) {
perms.add(PosixFilePermission.OWNER_WRITE);
}
if ((mode & 0400) != 0) {
perms.add(PosixFilePermission.OWNER_READ);
}
return perms;
}
}
| 7,333 | 32.953704 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
package org.apache.hadoop.security.token;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,022 | 43.478261 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token;
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.security.UserGroupInformation;
/**
* An identifier that identifies a token, may contain public information
* about a token, including its kind (or type).
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class TokenIdentifier implements Writable {
private String trackingId = null;
/**
* Get the token kind
* @return the kind of the token
*/
public abstract Text getKind();
/**
* Get the Ugi with the username encoded in the token identifier
*
* @return the username. null is returned if username in the identifier is
* empty or null.
*/
public abstract UserGroupInformation getUser();
/**
* Get the bytes for the token identifier
* @return the bytes of the identifier
*/
public byte[] getBytes() {
DataOutputBuffer buf = new DataOutputBuffer(4096);
try {
this.write(buf);
} catch (IOException ie) {
throw new RuntimeException("i/o error in getBytes", ie);
}
return Arrays.copyOf(buf.getData(), buf.getLength());
}
/**
* Returns a tracking identifier that can be used to associate usages of a
* token across multiple client sessions.
*
* Currently, this function just returns an MD5 of {{@link #getBytes()}.
*
* @return tracking identifier
*/
public String getTrackingId() {
if (trackingId == null) {
trackingId = DigestUtils.md5Hex(getBytes());
}
return trackingId;
}
}
| 2,659 | 30.294118 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenSelector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token;
import java.util.Collection;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
/**
* Select token of type T from tokens for use with named service
*
* @param <T>
* T extends TokenIdentifier
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface TokenSelector<T extends TokenIdentifier> {
Token<T> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens);
}
| 1,378 | 34.358974 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenRenewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
/**
* This is the interface for plugins that handle tokens.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class TokenRenewer {
/**
* Does this renewer handle this kind of token?
* @param kind the kind of the token
* @return true if this renewer can renew it
*/
public abstract boolean handleKind(Text kind);
/**
* Is the given token managed? Only managed tokens may be renewed or
* cancelled.
* @param token the token being checked
* @return true if the token may be renewed or cancelled
* @throws IOException
*/
public abstract boolean isManaged(Token<?> token) throws IOException;
/**
* Renew the given token.
* @return the new expiration time
* @throws IOException
* @throws InterruptedException
*/
public abstract long renew(Token<?> token,
Configuration conf
) throws IOException, InterruptedException;
/**
* Cancel the given token
* @throws IOException
* @throws InterruptedException
*/
public abstract void cancel(Token<?> token,
Configuration conf
) throws IOException, InterruptedException;
}
| 2,312 | 32.042857 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token;
import java.lang.annotation.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Indicates Token related information to be used
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
@InterfaceAudience.Public
@InterfaceStability.Evolving
public @interface TokenInfo {
/** The type of TokenSelector to be used */
Class<? extends TokenSelector<? extends TokenIdentifier>> value();
}
| 1,324 | 35.805556 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token;
import java.io.IOException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import javax.crypto.KeyGenerator;
import javax.crypto.Mac;
import javax.crypto.SecretKey;
import javax.crypto.spec.SecretKeySpec;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException;
/**
* The server-side secret manager for each token type.
* @param <T> The type of the token identifier
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class SecretManager<T extends TokenIdentifier> {
/**
* The token was invalid and the message explains why.
*/
@SuppressWarnings("serial")
@InterfaceStability.Evolving
public static class InvalidToken extends IOException {
public InvalidToken(String msg) {
super(msg);
}
}
/**
* Create the password for the given identifier.
* identifier may be modified inside this method.
* @param identifier the identifier to use
* @return the new password
*/
protected abstract byte[] createPassword(T identifier);
/**
* Retrieve the password for the given token identifier. Should check the date
* or registry to make sure the token hasn't expired or been revoked. Returns
* the relevant password.
* @param identifier the identifier to validate
* @return the password to use
* @throws InvalidToken the token was invalid
*/
public abstract byte[] retrievePassword(T identifier)
throws InvalidToken;
/**
* The same functionality with {@link #retrievePassword}, except that this
* method can throw a {@link RetriableException} or a {@link StandbyException}
* to indicate that client can retry/failover the same operation because of
* temporary issue on the server side.
*
* @param identifier the identifier to validate
* @return the password to use
* @throws InvalidToken the token was invalid
* @throws StandbyException the server is in standby state, the client can
* try other servers
* @throws RetriableException the token was invalid, and the server thinks
* this may be a temporary issue and suggests the client to retry
* @throws IOException to allow future exceptions to be added without breaking
* compatibility
*/
public byte[] retriableRetrievePassword(T identifier)
throws InvalidToken, StandbyException, RetriableException, IOException {
return retrievePassword(identifier);
}
/**
* Create an empty token identifier.
* @return the newly created empty token identifier
*/
public abstract T createIdentifier();
/**
* No-op if the secret manager is available for reading tokens, throw a
* StandbyException otherwise.
*
* @throws StandbyException if the secret manager is not available to read
* tokens
*/
public void checkAvailableForRead() throws StandbyException {
// Default to being available for read.
}
/**
* The name of the hashing algorithm.
*/
private static final String DEFAULT_HMAC_ALGORITHM = "HmacSHA1";
/**
* The length of the random keys to use.
*/
private static final int KEY_LENGTH = 64;
/**
* A thread local store for the Macs.
*/
private static final ThreadLocal<Mac> threadLocalMac =
new ThreadLocal<Mac>(){
@Override
protected Mac initialValue() {
try {
return Mac.getInstance(DEFAULT_HMAC_ALGORITHM);
} catch (NoSuchAlgorithmException nsa) {
throw new IllegalArgumentException("Can't find " + DEFAULT_HMAC_ALGORITHM +
" algorithm.");
}
}
};
/**
* Key generator to use.
*/
private final KeyGenerator keyGen;
{
try {
keyGen = KeyGenerator.getInstance(DEFAULT_HMAC_ALGORITHM);
keyGen.init(KEY_LENGTH);
} catch (NoSuchAlgorithmException nsa) {
throw new IllegalArgumentException("Can't find " + DEFAULT_HMAC_ALGORITHM +
" algorithm.");
}
}
/**
* Generate a new random secret key.
* @return the new key
*/
protected SecretKey generateSecret() {
SecretKey key;
synchronized (keyGen) {
key = keyGen.generateKey();
}
return key;
}
/**
* Compute HMAC of the identifier using the secret key and return the
* output as password
* @param identifier the bytes of the identifier
* @param key the secret key
* @return the bytes of the generated password
*/
protected static byte[] createPassword(byte[] identifier,
SecretKey key) {
Mac mac = threadLocalMac.get();
try {
mac.init(key);
} catch (InvalidKeyException ike) {
throw new IllegalArgumentException("Invalid key to HMAC computation",
ike);
}
return mac.doFinal(identifier);
}
/**
* Convert the byte[] to a secret key
* @param key the byte[] to create a secret key from
* @return the secret key
*/
protected static SecretKey createSecretKey(byte[] key) {
return new SecretKeySpec(key, DEFAULT_HMAC_ALGORITHM);
}
}
| 6,099 | 30.937173 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token;
import com.google.common.collect.Maps;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.*;
import org.apache.hadoop.util.ReflectionUtils;
import java.io.*;
import java.util.Arrays;
import java.util.Map;
import java.util.ServiceLoader;
/**
* The client-side form of the token.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class Token<T extends TokenIdentifier> implements Writable {
public static final Log LOG = LogFactory.getLog(Token.class);
private static Map<Text, Class<? extends TokenIdentifier>> tokenKindMap;
private byte[] identifier;
private byte[] password;
private Text kind;
private Text service;
private TokenRenewer renewer;
/**
* Construct a token given a token identifier and a secret manager for the
* type of the token identifier.
* @param id the token identifier
* @param mgr the secret manager
*/
public Token(T id, SecretManager<T> mgr) {
password = mgr.createPassword(id);
identifier = id.getBytes();
kind = id.getKind();
service = new Text();
}
/**
* Construct a token from the components.
* @param identifier the token identifier
* @param password the token's password
* @param kind the kind of token
* @param service the service for this token
*/
public Token(byte[] identifier, byte[] password, Text kind, Text service) {
this.identifier = (identifier == null)? new byte[0] : identifier;
this.password = (password == null)? new byte[0] : password;
this.kind = (kind == null)? new Text() : kind;
this.service = (service == null)? new Text() : service;
}
/**
* Default constructor
*/
public Token() {
identifier = new byte[0];
password = new byte[0];
kind = new Text();
service = new Text();
}
/**
* Clone a token.
* @param other the token to clone
*/
public Token(Token<T> other) {
this.identifier = other.identifier;
this.password = other.password;
this.kind = other.kind;
this.service = other.service;
}
/**
* Get the token identifier's byte representation
* @return the token identifier's byte representation
*/
public byte[] getIdentifier() {
return identifier;
}
private static Class<? extends TokenIdentifier>
getClassForIdentifier(Text kind) {
Class<? extends TokenIdentifier> cls = null;
synchronized (Token.class) {
if (tokenKindMap == null) {
tokenKindMap = Maps.newHashMap();
for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) {
tokenKindMap.put(id.getKind(), id.getClass());
}
}
cls = tokenKindMap.get(kind);
}
if (cls == null) {
LOG.warn("Cannot find class for token kind " + kind);
return null;
}
return cls;
}
/**
* Get the token identifier object, or null if it could not be constructed
* (because the class could not be loaded, for example).
* @return the token identifier, or null
* @throws IOException
*/
@SuppressWarnings("unchecked")
public T decodeIdentifier() throws IOException {
Class<? extends TokenIdentifier> cls = getClassForIdentifier(getKind());
if (cls == null) {
return null;
}
TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null);
ByteArrayInputStream buf = new ByteArrayInputStream(identifier);
DataInputStream in = new DataInputStream(buf);
tokenIdentifier.readFields(in);
in.close();
return (T) tokenIdentifier;
}
/**
* Get the token password/secret
* @return the token password/secret
*/
public byte[] getPassword() {
return password;
}
/**
* Get the token kind
* @return the kind of the token
*/
public synchronized Text getKind() {
return kind;
}
/**
* Set the token kind. This is only intended to be used by services that
* wrap another service's token, such as HFTP wrapping HDFS.
* @param newKind
*/
@InterfaceAudience.Private
public synchronized void setKind(Text newKind) {
kind = newKind;
renewer = null;
}
/**
* Get the service on which the token is supposed to be used
* @return the service name
*/
public Text getService() {
return service;
}
/**
* Set the service on which the token is supposed to be used
* @param newService the service name
*/
public void setService(Text newService) {
service = newService;
}
/**
* Indicates whether the token is a clone. Used by HA failover proxy
* to indicate a token should not be visible to the user via
* UGI.getCredentials()
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static class PrivateToken<T extends TokenIdentifier> extends Token<T> {
public PrivateToken(Token<T> token) {
super(token);
}
}
@Override
public void readFields(DataInput in) throws IOException {
int len = WritableUtils.readVInt(in);
if (identifier == null || identifier.length != len) {
identifier = new byte[len];
}
in.readFully(identifier);
len = WritableUtils.readVInt(in);
if (password == null || password.length != len) {
password = new byte[len];
}
in.readFully(password);
kind.readFields(in);
service.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, identifier.length);
out.write(identifier);
WritableUtils.writeVInt(out, password.length);
out.write(password);
kind.write(out);
service.write(out);
}
/**
* Generate a string with the url-quoted base64 encoded serialized form
* of the Writable.
* @param obj the object to serialize
* @return the encoded string
* @throws IOException
*/
private static String encodeWritable(Writable obj) throws IOException {
DataOutputBuffer buf = new DataOutputBuffer();
obj.write(buf);
Base64 encoder = new Base64(0, null, true);
byte[] raw = new byte[buf.getLength()];
System.arraycopy(buf.getData(), 0, raw, 0, buf.getLength());
return encoder.encodeToString(raw);
}
/**
* Modify the writable to the value from the newValue
* @param obj the object to read into
* @param newValue the string with the url-safe base64 encoded bytes
* @throws IOException
*/
private static void decodeWritable(Writable obj,
String newValue) throws IOException {
Base64 decoder = new Base64(0, null, true);
DataInputBuffer buf = new DataInputBuffer();
byte[] decoded = decoder.decode(newValue);
buf.reset(decoded, decoded.length);
obj.readFields(buf);
}
/**
* Encode this token as a url safe string
* @return the encoded string
* @throws IOException
*/
public String encodeToUrlString() throws IOException {
return encodeWritable(this);
}
/**
* Decode the given url safe string into this token.
* @param newValue the encoded string
* @throws IOException
*/
public void decodeFromUrlString(String newValue) throws IOException {
decodeWritable(this, newValue);
}
@SuppressWarnings("unchecked")
@Override
public boolean equals(Object right) {
if (this == right) {
return true;
} else if (right == null || getClass() != right.getClass()) {
return false;
} else {
Token<T> r = (Token<T>) right;
return Arrays.equals(identifier, r.identifier) &&
Arrays.equals(password, r.password) &&
kind.equals(r.kind) &&
service.equals(r.service);
}
}
@Override
public int hashCode() {
return WritableComparator.hashBytes(identifier, identifier.length);
}
private static void addBinaryBuffer(StringBuilder buffer, byte[] bytes) {
for (int idx = 0; idx < bytes.length; idx++) {
// if not the first, put a blank separator in
if (idx != 0) {
buffer.append(' ');
}
String num = Integer.toHexString(0xff & bytes[idx]);
// if it is only one digit, add a leading 0.
if (num.length() < 2) {
buffer.append('0');
}
buffer.append(num);
}
}
private void identifierToString(StringBuilder buffer) {
T id = null;
try {
id = decodeIdentifier();
} catch (IOException e) {
// handle in the finally block
} finally {
if (id != null) {
buffer.append("(").append(id).append(")");
} else {
addBinaryBuffer(buffer, identifier);
}
}
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append("Kind: ");
buffer.append(kind.toString());
buffer.append(", Service: ");
buffer.append(service.toString());
buffer.append(", Ident: ");
identifierToString(buffer);
return buffer.toString();
}
private static ServiceLoader<TokenRenewer> renewers =
ServiceLoader.load(TokenRenewer.class);
private synchronized TokenRenewer getRenewer() throws IOException {
if (renewer != null) {
return renewer;
}
renewer = TRIVIAL_RENEWER;
synchronized (renewers) {
for (TokenRenewer canidate : renewers) {
if (canidate.handleKind(this.kind)) {
renewer = canidate;
return renewer;
}
}
}
LOG.warn("No TokenRenewer defined for token kind " + this.kind);
return renewer;
}
/**
* Is this token managed so that it can be renewed or cancelled?
* @return true, if it can be renewed and cancelled.
*/
public boolean isManaged() throws IOException {
return getRenewer().isManaged(this);
}
/**
* Renew this delegation token
* @return the new expiration time
* @throws IOException
* @throws InterruptedException
*/
public long renew(Configuration conf
) throws IOException, InterruptedException {
return getRenewer().renew(this, conf);
}
/**
* Cancel this delegation token
* @throws IOException
* @throws InterruptedException
*/
public void cancel(Configuration conf
) throws IOException, InterruptedException {
getRenewer().cancel(this, conf);
}
/**
* A trivial renewer for token kinds that aren't managed. Sub-classes need
* to implement getKind for their token kind.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public static class TrivialRenewer extends TokenRenewer {
// define the kind for this renewer
protected Text getKind() {
return null;
}
@Override
public boolean handleKind(Text kind) {
return kind.equals(getKind());
}
@Override
public boolean isManaged(Token<?> token) {
return false;
}
@Override
public long renew(Token<?> token, Configuration conf) {
throw new UnsupportedOperationException("Token renewal is not supported "+
" for " + token.kind + " tokens");
}
@Override
public void cancel(Token<?> token, Configuration conf) throws IOException,
InterruptedException {
throw new UnsupportedOperationException("Token cancel is not supported " +
" for " + token.kind + " tokens");
}
}
private static final TokenRenewer TRIVIAL_RENEWER = new TrivialRenewer();
}
| 12,431 | 27.979021 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Evolving
package org.apache.hadoop.security.token.delegation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,073 | 45.695652 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import javax.security.auth.login.AppConfigurationEntry;
import org.apache.curator.ensemble.fixed.FixedEnsembleProvider;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.CuratorFrameworkFactory.Builder;
import org.apache.curator.framework.api.ACLProvider;
import org.apache.curator.framework.imps.DefaultACLProvider;
import org.apache.curator.framework.recipes.cache.ChildData;
import org.apache.curator.framework.recipes.cache.PathChildrenCache;
import org.apache.curator.framework.recipes.cache.PathChildrenCache.StartMode;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent;
import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener;
import org.apache.curator.framework.recipes.shared.SharedCount;
import org.apache.curator.framework.recipes.shared.VersionedValue;
import org.apache.curator.retry.RetryNTimes;
import org.apache.curator.utils.EnsurePath;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.ZooDefs.Perms;
import org.apache.zookeeper.client.ZooKeeperSaslClient;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* An implementation of {@link AbstractDelegationTokenSecretManager} that
* persists TokenIdentifiers and DelegationKeys in Zookeeper. This class can
* be used by HA (Highly available) services that consists of multiple nodes.
* This class ensures that Identifiers and Keys are replicated to all nodes of
* the service.
*/
@InterfaceAudience.Private
public abstract class ZKDelegationTokenSecretManager<TokenIdent extends AbstractDelegationTokenIdentifier>
extends AbstractDelegationTokenSecretManager<TokenIdent> {
private static final String ZK_CONF_PREFIX = "zk-dt-secret-manager.";
public static final String ZK_DTSM_ZK_NUM_RETRIES = ZK_CONF_PREFIX
+ "zkNumRetries";
public static final String ZK_DTSM_ZK_SESSION_TIMEOUT = ZK_CONF_PREFIX
+ "zkSessionTimeout";
public static final String ZK_DTSM_ZK_CONNECTION_TIMEOUT = ZK_CONF_PREFIX
+ "zkConnectionTimeout";
public static final String ZK_DTSM_ZK_SHUTDOWN_TIMEOUT = ZK_CONF_PREFIX
+ "zkShutdownTimeout";
public static final String ZK_DTSM_ZNODE_WORKING_PATH = ZK_CONF_PREFIX
+ "znodeWorkingPath";
public static final String ZK_DTSM_ZK_AUTH_TYPE = ZK_CONF_PREFIX
+ "zkAuthType";
public static final String ZK_DTSM_ZK_CONNECTION_STRING = ZK_CONF_PREFIX
+ "zkConnectionString";
public static final String ZK_DTSM_ZK_KERBEROS_KEYTAB = ZK_CONF_PREFIX
+ "kerberos.keytab";
public static final String ZK_DTSM_ZK_KERBEROS_PRINCIPAL = ZK_CONF_PREFIX
+ "kerberos.principal";
public static final int ZK_DTSM_ZK_NUM_RETRIES_DEFAULT = 3;
public static final int ZK_DTSM_ZK_SESSION_TIMEOUT_DEFAULT = 10000;
public static final int ZK_DTSM_ZK_CONNECTION_TIMEOUT_DEFAULT = 10000;
public static final int ZK_DTSM_ZK_SHUTDOWN_TIMEOUT_DEFAULT = 10000;
public static final String ZK_DTSM_ZNODE_WORKING_PATH_DEAFULT = "zkdtsm";
private static Logger LOG = LoggerFactory
.getLogger(ZKDelegationTokenSecretManager.class);
private static final String JAAS_LOGIN_ENTRY_NAME =
"ZKDelegationTokenSecretManagerClient";
private static final String ZK_DTSM_NAMESPACE = "ZKDTSMRoot";
private static final String ZK_DTSM_SEQNUM_ROOT = "/ZKDTSMSeqNumRoot";
private static final String ZK_DTSM_KEYID_ROOT = "/ZKDTSMKeyIdRoot";
private static final String ZK_DTSM_TOKENS_ROOT = "/ZKDTSMTokensRoot";
private static final String ZK_DTSM_MASTER_KEY_ROOT = "/ZKDTSMMasterKeyRoot";
private static final String DELEGATION_KEY_PREFIX = "DK_";
private static final String DELEGATION_TOKEN_PREFIX = "DT_";
private static final ThreadLocal<CuratorFramework> CURATOR_TL =
new ThreadLocal<CuratorFramework>();
public static void setCurator(CuratorFramework curator) {
CURATOR_TL.set(curator);
}
private final boolean isExternalClient;
private final CuratorFramework zkClient;
private SharedCount delTokSeqCounter;
private SharedCount keyIdSeqCounter;
private PathChildrenCache keyCache;
private PathChildrenCache tokenCache;
private ExecutorService listenerThreadPool;
private final long shutdownTimeout;
public ZKDelegationTokenSecretManager(Configuration conf) {
super(conf.getLong(DelegationTokenManager.UPDATE_INTERVAL,
DelegationTokenManager.UPDATE_INTERVAL_DEFAULT) * 1000,
conf.getLong(DelegationTokenManager.MAX_LIFETIME,
DelegationTokenManager.MAX_LIFETIME_DEFAULT) * 1000,
conf.getLong(DelegationTokenManager.RENEW_INTERVAL,
DelegationTokenManager.RENEW_INTERVAL_DEFAULT * 1000),
conf.getLong(DelegationTokenManager.REMOVAL_SCAN_INTERVAL,
DelegationTokenManager.REMOVAL_SCAN_INTERVAL_DEFAULT) * 1000);
shutdownTimeout = conf.getLong(ZK_DTSM_ZK_SHUTDOWN_TIMEOUT,
ZK_DTSM_ZK_SHUTDOWN_TIMEOUT_DEFAULT);
if (CURATOR_TL.get() != null) {
zkClient =
CURATOR_TL.get().usingNamespace(
conf.get(ZK_DTSM_ZNODE_WORKING_PATH,
ZK_DTSM_ZNODE_WORKING_PATH_DEAFULT)
+ "/" + ZK_DTSM_NAMESPACE);
isExternalClient = true;
} else {
String connString = conf.get(ZK_DTSM_ZK_CONNECTION_STRING);
Preconditions.checkNotNull(connString,
"Zookeeper connection string cannot be null");
String authType = conf.get(ZK_DTSM_ZK_AUTH_TYPE);
// AuthType has to be explicitly set to 'none' or 'sasl'
Preconditions.checkNotNull(authType, "Zookeeper authType cannot be null !!");
Preconditions.checkArgument(
authType.equals("sasl") || authType.equals("none"),
"Zookeeper authType must be one of [none, sasl]");
Builder builder = null;
try {
ACLProvider aclProvider = null;
if (authType.equals("sasl")) {
LOG.info("Connecting to ZooKeeper with SASL/Kerberos"
+ "and using 'sasl' ACLs");
String principal = setJaasConfiguration(conf);
System.setProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
JAAS_LOGIN_ENTRY_NAME);
System.setProperty("zookeeper.authProvider.1",
"org.apache.zookeeper.server.auth.SASLAuthenticationProvider");
aclProvider = new SASLOwnerACLProvider(principal);
} else { // "none"
LOG.info("Connecting to ZooKeeper without authentication");
aclProvider = new DefaultACLProvider(); // open to everyone
}
int sessionT =
conf.getInt(ZK_DTSM_ZK_SESSION_TIMEOUT,
ZK_DTSM_ZK_SESSION_TIMEOUT_DEFAULT);
int numRetries =
conf.getInt(ZK_DTSM_ZK_NUM_RETRIES, ZK_DTSM_ZK_NUM_RETRIES_DEFAULT);
builder =
CuratorFrameworkFactory
.builder()
.aclProvider(aclProvider)
.namespace(
conf.get(ZK_DTSM_ZNODE_WORKING_PATH,
ZK_DTSM_ZNODE_WORKING_PATH_DEAFULT)
+ "/"
+ ZK_DTSM_NAMESPACE
)
.sessionTimeoutMs(sessionT)
.connectionTimeoutMs(
conf.getInt(ZK_DTSM_ZK_CONNECTION_TIMEOUT,
ZK_DTSM_ZK_CONNECTION_TIMEOUT_DEFAULT)
)
.retryPolicy(
new RetryNTimes(numRetries, sessionT / numRetries));
} catch (Exception ex) {
throw new RuntimeException("Could not Load ZK acls or auth");
}
zkClient = builder.ensembleProvider(new FixedEnsembleProvider(connString))
.build();
isExternalClient = false;
}
}
private String setJaasConfiguration(Configuration config) throws Exception {
String keytabFile =
config.get(ZK_DTSM_ZK_KERBEROS_KEYTAB, "").trim();
if (keytabFile == null || keytabFile.length() == 0) {
throw new IllegalArgumentException(ZK_DTSM_ZK_KERBEROS_KEYTAB
+ " must be specified");
}
String principal =
config.get(ZK_DTSM_ZK_KERBEROS_PRINCIPAL, "").trim();
if (principal == null || principal.length() == 0) {
throw new IllegalArgumentException(ZK_DTSM_ZK_KERBEROS_PRINCIPAL
+ " must be specified");
}
JaasConfiguration jConf =
new JaasConfiguration(JAAS_LOGIN_ENTRY_NAME, principal, keytabFile);
javax.security.auth.login.Configuration.setConfiguration(jConf);
return principal.split("[/@]")[0];
}
/**
* Creates a programmatic version of a jaas.conf file. This can be used
* instead of writing a jaas.conf file and setting the system property,
* "java.security.auth.login.config", to point to that file. It is meant to be
* used for connecting to ZooKeeper.
*/
@InterfaceAudience.Private
public static class JaasConfiguration extends
javax.security.auth.login.Configuration {
private static AppConfigurationEntry[] entry;
private String entryName;
/**
* Add an entry to the jaas configuration with the passed in name,
* principal, and keytab. The other necessary options will be set for you.
*
* @param entryName
* The name of the entry (e.g. "Client")
* @param principal
* The principal of the user
* @param keytab
* The location of the keytab
*/
public JaasConfiguration(String entryName, String principal, String keytab) {
this.entryName = entryName;
Map<String, String> options = new HashMap<String, String>();
options.put("keyTab", keytab);
options.put("principal", principal);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("useTicketCache", "false");
options.put("refreshKrb5Config", "true");
String jaasEnvVar = System.getenv("HADOOP_JAAS_DEBUG");
if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) {
options.put("debug", "true");
}
entry = new AppConfigurationEntry[] {
new AppConfigurationEntry(getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options) };
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
return (entryName.equals(name)) ? entry : null;
}
private String getKrb5LoginModuleName() {
String krb5LoginModuleName;
if (System.getProperty("java.vendor").contains("IBM")) {
krb5LoginModuleName = "com.ibm.security.auth.module.Krb5LoginModule";
} else {
krb5LoginModuleName = "com.sun.security.auth.module.Krb5LoginModule";
}
return krb5LoginModuleName;
}
}
@Override
public void startThreads() throws IOException {
if (!isExternalClient) {
try {
zkClient.start();
} catch (Exception e) {
throw new IOException("Could not start Curator Framework", e);
}
} else {
// If namespace parents are implicitly created, they won't have ACLs.
// So, let's explicitly create them.
CuratorFramework nullNsFw = zkClient.usingNamespace(null);
EnsurePath ensureNs =
nullNsFw.newNamespaceAwareEnsurePath("/" + zkClient.getNamespace());
try {
ensureNs.ensure(nullNsFw.getZookeeperClient());
} catch (Exception e) {
throw new IOException("Could not create namespace", e);
}
}
listenerThreadPool = Executors.newSingleThreadExecutor();
try {
delTokSeqCounter = new SharedCount(zkClient, ZK_DTSM_SEQNUM_ROOT, 0);
if (delTokSeqCounter != null) {
delTokSeqCounter.start();
}
} catch (Exception e) {
throw new IOException("Could not start Sequence Counter", e);
}
try {
keyIdSeqCounter = new SharedCount(zkClient, ZK_DTSM_KEYID_ROOT, 0);
if (keyIdSeqCounter != null) {
keyIdSeqCounter.start();
}
} catch (Exception e) {
throw new IOException("Could not start KeyId Counter", e);
}
try {
createPersistentNode(ZK_DTSM_MASTER_KEY_ROOT);
createPersistentNode(ZK_DTSM_TOKENS_ROOT);
} catch (Exception e) {
throw new RuntimeException("Could not create ZK paths");
}
try {
keyCache = new PathChildrenCache(zkClient, ZK_DTSM_MASTER_KEY_ROOT, true);
if (keyCache != null) {
keyCache.start(StartMode.BUILD_INITIAL_CACHE);
keyCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client,
PathChildrenCacheEvent event)
throws Exception {
switch (event.getType()) {
case CHILD_ADDED:
processKeyAddOrUpdate(event.getData().getData());
break;
case CHILD_UPDATED:
processKeyAddOrUpdate(event.getData().getData());
break;
case CHILD_REMOVED:
processKeyRemoved(event.getData().getPath());
break;
default:
break;
}
}
}, listenerThreadPool);
}
} catch (Exception e) {
throw new IOException("Could not start PathChildrenCache for keys", e);
}
try {
tokenCache = new PathChildrenCache(zkClient, ZK_DTSM_TOKENS_ROOT, true);
if (tokenCache != null) {
tokenCache.start(StartMode.BUILD_INITIAL_CACHE);
tokenCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client,
PathChildrenCacheEvent event) throws Exception {
switch (event.getType()) {
case CHILD_ADDED:
processTokenAddOrUpdate(event.getData());
break;
case CHILD_UPDATED:
processTokenAddOrUpdate(event.getData());
break;
case CHILD_REMOVED:
processTokenRemoved(event.getData());
break;
default:
break;
}
}
}, listenerThreadPool);
}
} catch (Exception e) {
throw new IOException("Could not start PathChildrenCache for tokens", e);
}
super.startThreads();
}
private void processKeyAddOrUpdate(byte[] data) throws IOException {
ByteArrayInputStream bin = new ByteArrayInputStream(data);
DataInputStream din = new DataInputStream(bin);
DelegationKey key = new DelegationKey();
key.readFields(din);
synchronized (this) {
allKeys.put(key.getKeyId(), key);
}
}
private void processKeyRemoved(String path) {
int i = path.lastIndexOf('/');
if (i > 0) {
String tokSeg = path.substring(i + 1);
int j = tokSeg.indexOf('_');
if (j > 0) {
int keyId = Integer.parseInt(tokSeg.substring(j + 1));
synchronized (this) {
allKeys.remove(keyId);
}
}
}
}
private void processTokenAddOrUpdate(ChildData data) throws IOException {
ByteArrayInputStream bin = new ByteArrayInputStream(data.getData());
DataInputStream din = new DataInputStream(bin);
TokenIdent ident = createIdentifier();
ident.readFields(din);
long renewDate = din.readLong();
int pwdLen = din.readInt();
byte[] password = new byte[pwdLen];
int numRead = din.read(password, 0, pwdLen);
if (numRead > -1) {
DelegationTokenInformation tokenInfo =
new DelegationTokenInformation(renewDate, password);
synchronized (this) {
currentTokens.put(ident, tokenInfo);
// The cancel task might be waiting
notifyAll();
}
}
}
private void processTokenRemoved(ChildData data) throws IOException {
ByteArrayInputStream bin = new ByteArrayInputStream(data.getData());
DataInputStream din = new DataInputStream(bin);
TokenIdent ident = createIdentifier();
ident.readFields(din);
synchronized (this) {
currentTokens.remove(ident);
// The cancel task might be waiting
notifyAll();
}
}
@Override
public void stopThreads() {
super.stopThreads();
try {
if (tokenCache != null) {
tokenCache.close();
}
} catch (Exception e) {
LOG.error("Could not stop Delegation Token Cache", e);
}
try {
if (delTokSeqCounter != null) {
delTokSeqCounter.close();
}
} catch (Exception e) {
LOG.error("Could not stop Delegation Token Counter", e);
}
try {
if (keyIdSeqCounter != null) {
keyIdSeqCounter.close();
}
} catch (Exception e) {
LOG.error("Could not stop Key Id Counter", e);
}
try {
if (keyCache != null) {
keyCache.close();
}
} catch (Exception e) {
LOG.error("Could not stop KeyCache", e);
}
try {
if (!isExternalClient && (zkClient != null)) {
zkClient.close();
}
} catch (Exception e) {
LOG.error("Could not stop Curator Framework", e);
}
if (listenerThreadPool != null) {
listenerThreadPool.shutdown();
try {
// wait for existing tasks to terminate
if (!listenerThreadPool.awaitTermination(shutdownTimeout,
TimeUnit.MILLISECONDS)) {
LOG.error("Forcing Listener threadPool to shutdown !!");
listenerThreadPool.shutdownNow();
}
} catch (InterruptedException ie) {
listenerThreadPool.shutdownNow();
Thread.currentThread().interrupt();
}
}
}
private void createPersistentNode(String nodePath) throws Exception {
try {
zkClient.create().withMode(CreateMode.PERSISTENT).forPath(nodePath);
} catch (KeeperException.NodeExistsException ne) {
LOG.debug(nodePath + " znode already exists !!");
} catch (Exception e) {
throw new IOException(nodePath + " znode could not be created !!", e);
}
}
@Override
protected int getDelegationTokenSeqNum() {
return delTokSeqCounter.getCount();
}
private void incrSharedCount(SharedCount sharedCount) throws Exception {
while (true) {
// Loop until we successfully increment the counter
VersionedValue<Integer> versionedValue = sharedCount.getVersionedValue();
if (sharedCount.trySetCount(versionedValue, versionedValue.getValue() + 1)) {
break;
}
}
}
@Override
protected int incrementDelegationTokenSeqNum() {
try {
incrSharedCount(delTokSeqCounter);
} catch (InterruptedException e) {
// The ExpirationThread is just finishing.. so dont do anything..
LOG.debug("Thread interrupted while performing token counter increment", e);
Thread.currentThread().interrupt();
} catch (Exception e) {
throw new RuntimeException("Could not increment shared counter !!", e);
}
return delTokSeqCounter.getCount();
}
@Override
protected void setDelegationTokenSeqNum(int seqNum) {
try {
delTokSeqCounter.setCount(seqNum);
} catch (Exception e) {
throw new RuntimeException("Could not set shared counter !!", e);
}
}
@Override
protected int getCurrentKeyId() {
return keyIdSeqCounter.getCount();
}
@Override
protected int incrementCurrentKeyId() {
try {
incrSharedCount(keyIdSeqCounter);
} catch (InterruptedException e) {
// The ExpirationThread is just finishing.. so dont do anything..
LOG.debug("Thread interrupted while performing keyId increment", e);
Thread.currentThread().interrupt();
} catch (Exception e) {
throw new RuntimeException("Could not increment shared keyId counter !!", e);
}
return keyIdSeqCounter.getCount();
}
@Override
protected DelegationKey getDelegationKey(int keyId) {
// First check if its I already have this key
DelegationKey key = allKeys.get(keyId);
// Then query ZK
if (key == null) {
try {
key = getKeyFromZK(keyId);
if (key != null) {
allKeys.put(keyId, key);
}
} catch (IOException e) {
LOG.error("Error retrieving key [" + keyId + "] from ZK", e);
}
}
return key;
}
private DelegationKey getKeyFromZK(int keyId) throws IOException {
String nodePath =
getNodePath(ZK_DTSM_MASTER_KEY_ROOT, DELEGATION_KEY_PREFIX + keyId);
try {
byte[] data = zkClient.getData().forPath(nodePath);
if ((data == null) || (data.length == 0)) {
return null;
}
ByteArrayInputStream bin = new ByteArrayInputStream(data);
DataInputStream din = new DataInputStream(bin);
DelegationKey key = new DelegationKey();
key.readFields(din);
return key;
} catch (KeeperException.NoNodeException e) {
LOG.error("No node in path [" + nodePath + "]");
} catch (Exception ex) {
throw new IOException(ex);
}
return null;
}
@Override
protected DelegationTokenInformation getTokenInfo(TokenIdent ident) {
// First check if I have this..
DelegationTokenInformation tokenInfo = currentTokens.get(ident);
// Then query ZK
if (tokenInfo == null) {
try {
tokenInfo = getTokenInfoFromZK(ident);
if (tokenInfo != null) {
currentTokens.put(ident, tokenInfo);
}
} catch (IOException e) {
LOG.error("Error retrieving tokenInfo [" + ident.getSequenceNumber()
+ "] from ZK", e);
}
}
return tokenInfo;
}
private DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident)
throws IOException {
return getTokenInfoFromZK(ident, false);
}
private DelegationTokenInformation getTokenInfoFromZK(TokenIdent ident,
boolean quiet) throws IOException {
String nodePath =
getNodePath(ZK_DTSM_TOKENS_ROOT,
DELEGATION_TOKEN_PREFIX + ident.getSequenceNumber());
try {
byte[] data = zkClient.getData().forPath(nodePath);
if ((data == null) || (data.length == 0)) {
return null;
}
ByteArrayInputStream bin = new ByteArrayInputStream(data);
DataInputStream din = new DataInputStream(bin);
createIdentifier().readFields(din);
long renewDate = din.readLong();
int pwdLen = din.readInt();
byte[] password = new byte[pwdLen];
int numRead = din.read(password, 0, pwdLen);
if (numRead > -1) {
DelegationTokenInformation tokenInfo =
new DelegationTokenInformation(renewDate, password);
return tokenInfo;
}
} catch (KeeperException.NoNodeException e) {
if (!quiet) {
LOG.error("No node in path [" + nodePath + "]");
}
} catch (Exception ex) {
throw new IOException(ex);
}
return null;
}
@Override
protected void storeDelegationKey(DelegationKey key) throws IOException {
addOrUpdateDelegationKey(key, false);
}
@Override
protected void updateDelegationKey(DelegationKey key) throws IOException {
addOrUpdateDelegationKey(key, true);
}
private void addOrUpdateDelegationKey(DelegationKey key, boolean isUpdate)
throws IOException {
String nodeCreatePath =
getNodePath(ZK_DTSM_MASTER_KEY_ROOT,
DELEGATION_KEY_PREFIX + key.getKeyId());
ByteArrayOutputStream os = new ByteArrayOutputStream();
DataOutputStream fsOut = new DataOutputStream(os);
if (LOG.isDebugEnabled()) {
LOG.debug("Storing ZKDTSMDelegationKey_" + key.getKeyId());
}
key.write(fsOut);
try {
if (zkClient.checkExists().forPath(nodeCreatePath) != null) {
zkClient.setData().forPath(nodeCreatePath, os.toByteArray())
.setVersion(-1);
if (!isUpdate) {
LOG.debug("Key with path [" + nodeCreatePath
+ "] already exists.. Updating !!");
}
} else {
zkClient.create().withMode(CreateMode.PERSISTENT)
.forPath(nodeCreatePath, os.toByteArray());
if (isUpdate) {
LOG.debug("Updating non existent Key path [" + nodeCreatePath
+ "].. Adding new !!");
}
}
} catch (KeeperException.NodeExistsException ne) {
LOG.debug(nodeCreatePath + " znode already exists !!");
} catch (Exception ex) {
throw new IOException(ex);
} finally {
os.close();
}
}
@Override
protected void removeStoredMasterKey(DelegationKey key) {
String nodeRemovePath =
getNodePath(ZK_DTSM_MASTER_KEY_ROOT,
DELEGATION_KEY_PREFIX + key.getKeyId());
if (LOG.isDebugEnabled()) {
LOG.debug("Removing ZKDTSMDelegationKey_" + key.getKeyId());
}
try {
if (zkClient.checkExists().forPath(nodeRemovePath) != null) {
while(zkClient.checkExists().forPath(nodeRemovePath) != null){
try {
zkClient.delete().guaranteed().forPath(nodeRemovePath);
} catch (NoNodeException nne) {
// It is possible that the node might be deleted between the
// check and the actual delete.. which might lead to an
// exception that can bring down the daemon running this
// SecretManager
LOG.debug("Node already deleted by peer " + nodeRemovePath);
}
}
} else {
LOG.debug("Attempted to delete a non-existing znode " + nodeRemovePath);
}
} catch (Exception e) {
LOG.debug(nodeRemovePath + " znode could not be removed!!");
}
}
@Override
protected void storeToken(TokenIdent ident,
DelegationTokenInformation tokenInfo) throws IOException {
try {
addOrUpdateToken(ident, tokenInfo, false);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
protected void updateToken(TokenIdent ident,
DelegationTokenInformation tokenInfo) throws IOException {
String nodeRemovePath =
getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX
+ ident.getSequenceNumber());
try {
if (zkClient.checkExists().forPath(nodeRemovePath) != null) {
addOrUpdateToken(ident, tokenInfo, true);
} else {
addOrUpdateToken(ident, tokenInfo, false);
LOG.debug("Attempted to update a non-existing znode " + nodeRemovePath);
}
} catch (Exception e) {
throw new RuntimeException("Could not update Stored Token ZKDTSMDelegationToken_"
+ ident.getSequenceNumber(), e);
}
}
@Override
protected void removeStoredToken(TokenIdent ident)
throws IOException {
String nodeRemovePath =
getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX
+ ident.getSequenceNumber());
if (LOG.isDebugEnabled()) {
LOG.debug("Removing ZKDTSMDelegationToken_"
+ ident.getSequenceNumber());
}
try {
if (zkClient.checkExists().forPath(nodeRemovePath) != null) {
while(zkClient.checkExists().forPath(nodeRemovePath) != null){
try {
zkClient.delete().guaranteed().forPath(nodeRemovePath);
} catch (NoNodeException nne) {
// It is possible that the node might be deleted between the
// check and the actual delete.. which might lead to an
// exception that can bring down the daemon running this
// SecretManager
LOG.debug("Node already deleted by peer " + nodeRemovePath);
}
}
} else {
LOG.debug("Attempted to remove a non-existing znode " + nodeRemovePath);
}
} catch (Exception e) {
throw new RuntimeException(
"Could not remove Stored Token ZKDTSMDelegationToken_"
+ ident.getSequenceNumber(), e);
}
}
@Override
public synchronized TokenIdent cancelToken(Token<TokenIdent> token,
String canceller) throws IOException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
TokenIdent id = createIdentifier();
id.readFields(in);
try {
if (!currentTokens.containsKey(id)) {
// See if token can be retrieved and placed in currentTokens
getTokenInfo(id);
}
return super.cancelToken(token, canceller);
} catch (Exception e) {
LOG.error("Exception while checking if token exist !!", e);
return id;
}
}
private void addOrUpdateToken(TokenIdent ident,
DelegationTokenInformation info, boolean isUpdate) throws Exception {
String nodeCreatePath =
getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX
+ ident.getSequenceNumber());
ByteArrayOutputStream tokenOs = new ByteArrayOutputStream();
DataOutputStream tokenOut = new DataOutputStream(tokenOs);
ByteArrayOutputStream seqOs = new ByteArrayOutputStream();
try {
ident.write(tokenOut);
tokenOut.writeLong(info.getRenewDate());
tokenOut.writeInt(info.getPassword().length);
tokenOut.write(info.getPassword());
if (LOG.isDebugEnabled()) {
LOG.debug((isUpdate ? "Updating " : "Storing ")
+ "ZKDTSMDelegationToken_" +
ident.getSequenceNumber());
}
if (isUpdate) {
zkClient.setData().forPath(nodeCreatePath, tokenOs.toByteArray())
.setVersion(-1);
} else {
zkClient.create().withMode(CreateMode.PERSISTENT)
.forPath(nodeCreatePath, tokenOs.toByteArray());
}
} finally {
seqOs.close();
}
}
/**
* Simple implementation of an {@link ACLProvider} that simply returns an ACL
* that gives all permissions only to a single principal.
*/
private static class SASLOwnerACLProvider implements ACLProvider {
private final List<ACL> saslACL;
private SASLOwnerACLProvider(String principal) {
this.saslACL = Collections.singletonList(
new ACL(Perms.ALL, new Id("sasl", principal)));
}
@Override
public List<ACL> getDefaultAcl() {
return saslACL;
}
@Override
public List<ACL> getAclForPath(String path) {
return saslACL;
}
}
@VisibleForTesting
@Private
@Unstable
static String getNodePath(String root, String nodeName) {
return (root + "/" + nodeName);
}
@VisibleForTesting
public ExecutorService getListenerThreadPool() {
return listenerThreadPool;
}
}
| 32,166 | 35.102132 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import javax.crypto.SecretKey;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.HadoopKerberosName;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Hive"})
@InterfaceStability.Evolving
public abstract
class AbstractDelegationTokenSecretManager<TokenIdent
extends AbstractDelegationTokenIdentifier>
extends SecretManager<TokenIdent> {
private static final Log LOG = LogFactory
.getLog(AbstractDelegationTokenSecretManager.class);
/**
* Cache of currently valid tokens, mapping from DelegationTokenIdentifier
* to DelegationTokenInformation. Protected by this object lock.
*/
protected final Map<TokenIdent, DelegationTokenInformation> currentTokens
= new HashMap<TokenIdent, DelegationTokenInformation>();
/**
* Sequence number to create DelegationTokenIdentifier.
* Protected by this object lock.
*/
protected int delegationTokenSequenceNumber = 0;
/**
* Access to allKeys is protected by this object lock
*/
protected final Map<Integer, DelegationKey> allKeys
= new HashMap<Integer, DelegationKey>();
/**
* Access to currentId is protected by this object lock.
*/
protected int currentId = 0;
/**
* Access to currentKey is protected by this object lock
*/
private DelegationKey currentKey;
private long keyUpdateInterval;
private long tokenMaxLifetime;
private long tokenRemoverScanInterval;
private long tokenRenewInterval;
/**
* Whether to store a token's tracking ID in its TokenInformation.
* Can be overridden by a subclass.
*/
protected boolean storeTokenTrackingId;
private Thread tokenRemoverThread;
protected volatile boolean running;
/**
* If the delegation token update thread holds this lock, it will
* not get interrupted.
*/
protected Object noInterruptsLock = new Object();
/**
* Create a secret manager
* @param delegationKeyUpdateInterval the number of milliseconds for rolling
* new secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
* tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
* in milliseconds
* @param delegationTokenRemoverScanInterval how often the tokens are scanned
* for expired tokens in milliseconds
*/
public AbstractDelegationTokenSecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
this.keyUpdateInterval = delegationKeyUpdateInterval;
this.tokenMaxLifetime = delegationTokenMaxLifetime;
this.tokenRenewInterval = delegationTokenRenewInterval;
this.tokenRemoverScanInterval = delegationTokenRemoverScanInterval;
this.storeTokenTrackingId = false;
}
/** should be called before this object is used */
public void startThreads() throws IOException {
Preconditions.checkState(!running);
updateCurrentKey();
synchronized (this) {
running = true;
tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
tokenRemoverThread.start();
}
}
/**
* Reset all data structures and mutable state.
*/
public synchronized void reset() {
setCurrentKeyId(0);
allKeys.clear();
setDelegationTokenSeqNum(0);
currentTokens.clear();
}
/**
* Add a previously used master key to cache (when NN restarts),
* should be called before activate().
* */
public synchronized void addKey(DelegationKey key) throws IOException {
if (running) // a safety check
throw new IOException("Can't add delegation key to a running SecretManager.");
if (key.getKeyId() > getCurrentKeyId()) {
setCurrentKeyId(key.getKeyId());
}
allKeys.put(key.getKeyId(), key);
}
public synchronized DelegationKey[] getAllKeys() {
return allKeys.values().toArray(new DelegationKey[0]);
}
// HDFS
protected void logUpdateMasterKey(DelegationKey key) throws IOException {
return;
}
// HDFS
protected void logExpireToken(TokenIdent ident) throws IOException {
return;
}
// RM
protected void storeNewMasterKey(DelegationKey key) throws IOException {
return;
}
// RM
protected void removeStoredMasterKey(DelegationKey key) {
return;
}
// RM
protected void storeNewToken(TokenIdent ident, long renewDate) throws IOException{
return;
}
// RM
protected void removeStoredToken(TokenIdent ident) throws IOException {
}
// RM
protected void updateStoredToken(TokenIdent ident, long renewDate) throws IOException {
return;
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected synchronized int getCurrentKeyId() {
return currentId;
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected synchronized int incrementCurrentKeyId() {
return ++currentId;
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected synchronized void setCurrentKeyId(int keyId) {
currentId = keyId;
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected synchronized int getDelegationTokenSeqNum() {
return delegationTokenSequenceNumber;
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected synchronized int incrementDelegationTokenSeqNum() {
return ++delegationTokenSequenceNumber;
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected synchronized void setDelegationTokenSeqNum(int seqNum) {
delegationTokenSequenceNumber = seqNum;
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected DelegationKey getDelegationKey(int keyId) {
return allKeys.get(keyId);
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected void storeDelegationKey(DelegationKey key) throws IOException {
allKeys.put(key.getKeyId(), key);
storeNewMasterKey(key);
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected void updateDelegationKey(DelegationKey key) throws IOException {
allKeys.put(key.getKeyId(), key);
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected DelegationTokenInformation getTokenInfo(TokenIdent ident) {
return currentTokens.get(ident);
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected void storeToken(TokenIdent ident,
DelegationTokenInformation tokenInfo) throws IOException {
currentTokens.put(ident, tokenInfo);
storeNewToken(ident, tokenInfo.getRenewDate());
}
/**
* For subclasses externalizing the storage, for example Zookeeper
* based implementations
*/
protected void updateToken(TokenIdent ident,
DelegationTokenInformation tokenInfo) throws IOException {
currentTokens.put(ident, tokenInfo);
updateStoredToken(ident, tokenInfo.getRenewDate());
}
/**
* This method is intended to be used for recovering persisted delegation
* tokens
* This method must be called before this secret manager is activated (before
* startThreads() is called)
* @param identifier identifier read from persistent storage
* @param renewDate token renew time
* @throws IOException
*/
public synchronized void addPersistedDelegationToken(
TokenIdent identifier, long renewDate) throws IOException {
if (running) {
// a safety check
throw new IOException(
"Can't add persisted delegation token to a running SecretManager.");
}
int keyId = identifier.getMasterKeyId();
DelegationKey dKey = allKeys.get(keyId);
if (dKey == null) {
LOG.warn("No KEY found for persisted identifier " + identifier.toString());
return;
}
byte[] password = createPassword(identifier.getBytes(), dKey.getKey());
if (identifier.getSequenceNumber() > getDelegationTokenSeqNum()) {
setDelegationTokenSeqNum(identifier.getSequenceNumber());
}
if (getTokenInfo(identifier) == null) {
currentTokens.put(identifier, new DelegationTokenInformation(renewDate,
password, getTrackingIdIfEnabled(identifier)));
} else {
throw new IOException("Same delegation token being added twice.");
}
}
/**
* Update the current master key
* This is called once by startThreads before tokenRemoverThread is created,
* and only by tokenRemoverThread afterwards.
*/
private void updateCurrentKey() throws IOException {
LOG.info("Updating the current master key for generating delegation tokens");
/* Create a new currentKey with an estimated expiry date. */
int newCurrentId;
synchronized (this) {
newCurrentId = incrementCurrentKeyId();
}
DelegationKey newKey = new DelegationKey(newCurrentId, System
.currentTimeMillis()
+ keyUpdateInterval + tokenMaxLifetime, generateSecret());
//Log must be invoked outside the lock on 'this'
logUpdateMasterKey(newKey);
synchronized (this) {
currentKey = newKey;
storeDelegationKey(currentKey);
}
}
/**
* Update the current master key for generating delegation tokens
* It should be called only by tokenRemoverThread.
*/
void rollMasterKey() throws IOException {
synchronized (this) {
removeExpiredKeys();
/* set final expiry date for retiring currentKey */
currentKey.setExpiryDate(Time.now() + tokenMaxLifetime);
/*
* currentKey might have been removed by removeExpiredKeys(), if
* updateMasterKey() isn't called at expected interval. Add it back to
* allKeys just in case.
*/
updateDelegationKey(currentKey);
}
updateCurrentKey();
}
private synchronized void removeExpiredKeys() {
long now = Time.now();
for (Iterator<Map.Entry<Integer, DelegationKey>> it = allKeys.entrySet()
.iterator(); it.hasNext();) {
Map.Entry<Integer, DelegationKey> e = it.next();
if (e.getValue().getExpiryDate() < now) {
it.remove();
// ensure the tokens generated by this current key can be recovered
// with this current key after this current key is rolled
if(!e.getValue().equals(currentKey))
removeStoredMasterKey(e.getValue());
}
}
}
@Override
protected synchronized byte[] createPassword(TokenIdent identifier) {
int sequenceNum;
long now = Time.now();
sequenceNum = incrementDelegationTokenSeqNum();
identifier.setIssueDate(now);
identifier.setMaxDate(now + tokenMaxLifetime);
identifier.setMasterKeyId(currentKey.getKeyId());
identifier.setSequenceNumber(sequenceNum);
LOG.info("Creating password for identifier: " + identifier
+ ", currentKey: " + currentKey.getKeyId());
byte[] password = createPassword(identifier.getBytes(), currentKey.getKey());
DelegationTokenInformation tokenInfo = new DelegationTokenInformation(now
+ tokenRenewInterval, password, getTrackingIdIfEnabled(identifier));
try {
storeToken(identifier, tokenInfo);
} catch (IOException ioe) {
LOG.error("Could not store token !!", ioe);
}
return password;
}
/**
* Find the DelegationTokenInformation for the given token id, and verify that
* if the token is expired. Note that this method should be called with
* acquiring the secret manager's monitor.
*/
protected DelegationTokenInformation checkToken(TokenIdent identifier)
throws InvalidToken {
assert Thread.holdsLock(this);
DelegationTokenInformation info = getTokenInfo(identifier);
if (info == null) {
throw new InvalidToken("token (" + identifier.toString()
+ ") can't be found in cache");
}
if (info.getRenewDate() < Time.now()) {
throw new InvalidToken("token (" + identifier.toString() + ") is expired");
}
return info;
}
@Override
public synchronized byte[] retrievePassword(TokenIdent identifier)
throws InvalidToken {
return checkToken(identifier).getPassword();
}
protected String getTrackingIdIfEnabled(TokenIdent ident) {
if (storeTokenTrackingId) {
return ident.getTrackingId();
}
return null;
}
public synchronized String getTokenTrackingId(TokenIdent identifier) {
DelegationTokenInformation info = getTokenInfo(identifier);
if (info == null) {
return null;
}
return info.getTrackingId();
}
/**
* Verifies that the given identifier and password are valid and match.
* @param identifier Token identifier.
* @param password Password in the token.
* @throws InvalidToken
*/
public synchronized void verifyToken(TokenIdent identifier, byte[] password)
throws InvalidToken {
byte[] storedPassword = retrievePassword(identifier);
if (!Arrays.equals(password, storedPassword)) {
throw new InvalidToken("token (" + identifier
+ ") is invalid, password doesn't match");
}
}
/**
* Renew a delegation token.
* @param token the token to renew
* @param renewer the full principal name of the user doing the renewal
* @return the new expiration time
* @throws InvalidToken if the token is invalid
* @throws AccessControlException if the user can't renew token
*/
public synchronized long renewToken(Token<TokenIdent> token,
String renewer) throws InvalidToken, IOException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
TokenIdent id = createIdentifier();
id.readFields(in);
LOG.info("Token renewal for identifier: " + id + "; total currentTokens "
+ currentTokens.size());
long now = Time.now();
if (id.getMaxDate() < now) {
throw new InvalidToken(renewer + " tried to renew an expired token");
}
if ((id.getRenewer() == null) || (id.getRenewer().toString().isEmpty())) {
throw new AccessControlException(renewer +
" tried to renew a token without a renewer");
}
if (!id.getRenewer().toString().equals(renewer)) {
throw new AccessControlException(renewer +
" tries to renew a token with renewer " + id.getRenewer());
}
DelegationKey key = getDelegationKey(id.getMasterKeyId());
if (key == null) {
throw new InvalidToken("Unable to find master key for keyId="
+ id.getMasterKeyId()
+ " from cache. Failed to renew an unexpired token"
+ " with sequenceNumber=" + id.getSequenceNumber());
}
byte[] password = createPassword(token.getIdentifier(), key.getKey());
if (!Arrays.equals(password, token.getPassword())) {
throw new AccessControlException(renewer +
" is trying to renew a token with wrong password");
}
long renewTime = Math.min(id.getMaxDate(), now + tokenRenewInterval);
String trackingId = getTrackingIdIfEnabled(id);
DelegationTokenInformation info = new DelegationTokenInformation(renewTime,
password, trackingId);
if (getTokenInfo(id) == null) {
throw new InvalidToken("Renewal request for unknown token");
}
updateToken(id, info);
return renewTime;
}
/**
* Cancel a token by removing it from cache.
* @return Identifier of the canceled token
* @throws InvalidToken for invalid token
* @throws AccessControlException if the user isn't allowed to cancel
*/
public synchronized TokenIdent cancelToken(Token<TokenIdent> token,
String canceller) throws IOException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
TokenIdent id = createIdentifier();
id.readFields(in);
LOG.info("Token cancelation requested for identifier: "+id);
if (id.getUser() == null) {
throw new InvalidToken("Token with no owner");
}
String owner = id.getUser().getUserName();
Text renewer = id.getRenewer();
HadoopKerberosName cancelerKrbName = new HadoopKerberosName(canceller);
String cancelerShortName = cancelerKrbName.getShortName();
if (!canceller.equals(owner)
&& (renewer == null || renewer.toString().isEmpty() || !cancelerShortName
.equals(renewer.toString()))) {
throw new AccessControlException(canceller
+ " is not authorized to cancel the token");
}
DelegationTokenInformation info = currentTokens.remove(id);
if (info == null) {
throw new InvalidToken("Token not found");
}
removeStoredToken(id);
return id;
}
/**
* Convert the byte[] to a secret key
* @param key the byte[] to create the secret key from
* @return the secret key
*/
public static SecretKey createSecretKey(byte[] key) {
return SecretManager.createSecretKey(key);
}
/** Class to encapsulate a token's renew date and password. */
@InterfaceStability.Evolving
public static class DelegationTokenInformation {
long renewDate;
byte[] password;
String trackingId;
public DelegationTokenInformation(long renewDate, byte[] password) {
this(renewDate, password, null);
}
public DelegationTokenInformation(long renewDate, byte[] password,
String trackingId) {
this.renewDate = renewDate;
this.password = password;
this.trackingId = trackingId;
}
/** returns renew date */
public long getRenewDate() {
return renewDate;
}
/** returns password */
byte[] getPassword() {
return password;
}
/** returns tracking id */
public String getTrackingId() {
return trackingId;
}
}
/** Remove expired delegation tokens from cache */
private void removeExpiredToken() throws IOException {
long now = Time.now();
Set<TokenIdent> expiredTokens = new HashSet<TokenIdent>();
synchronized (this) {
Iterator<Map.Entry<TokenIdent, DelegationTokenInformation>> i =
currentTokens.entrySet().iterator();
while (i.hasNext()) {
Map.Entry<TokenIdent, DelegationTokenInformation> entry = i.next();
long renewDate = entry.getValue().getRenewDate();
if (renewDate < now) {
expiredTokens.add(entry.getKey());
i.remove();
}
}
}
// don't hold lock on 'this' to avoid edit log updates blocking token ops
for (TokenIdent ident : expiredTokens) {
logExpireToken(ident);
removeStoredToken(ident);
}
}
public void stopThreads() {
if (LOG.isDebugEnabled())
LOG.debug("Stopping expired delegation token remover thread");
running = false;
if (tokenRemoverThread != null) {
synchronized (noInterruptsLock) {
tokenRemoverThread.interrupt();
}
try {
tokenRemoverThread.join();
} catch (InterruptedException e) {
throw new RuntimeException(
"Unable to join on token removal thread", e);
}
}
}
/**
* is secretMgr running
* @return true if secret mgr is running
*/
public synchronized boolean isRunning() {
return running;
}
private class ExpiredTokenRemover extends Thread {
private long lastMasterKeyUpdate;
private long lastTokenCacheCleanup;
@Override
public void run() {
LOG.info("Starting expired delegation token remover thread, "
+ "tokenRemoverScanInterval=" + tokenRemoverScanInterval
/ (60 * 1000) + " min(s)");
try {
while (running) {
long now = Time.now();
if (lastMasterKeyUpdate + keyUpdateInterval < now) {
try {
rollMasterKey();
lastMasterKeyUpdate = now;
} catch (IOException e) {
LOG.error("Master key updating failed: ", e);
}
}
if (lastTokenCacheCleanup + tokenRemoverScanInterval < now) {
removeExpiredToken();
lastTokenCacheCleanup = now;
}
try {
Thread.sleep(Math.min(5000, keyUpdateInterval)); // 5 seconds
} catch (InterruptedException ie) {
LOG.error("ExpiredTokenRemover received " + ie);
}
}
} catch (Throwable t) {
LOG.error("ExpiredTokenRemover thread received unexpected exception", t);
Runtime.getRuntime().exit(-1);
}
}
}
/**
* Decode the token identifier. The subclass can customize the way to decode
* the token identifier.
*
* @param token the token where to extract the identifier
* @return the delegation token identifier
* @throws IOException
*/
public TokenIdent decodeTokenIdentifier(Token<TokenIdent> token) throws IOException {
return token.decodeIdentifier();
}
}
| 22,804 | 31.907648 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSelector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation;
import java.util.Collection;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenSelector;
/**
* Look through tokens to find the first delegation token that matches the
* service and return it.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public
class AbstractDelegationTokenSelector<TokenIdent
extends AbstractDelegationTokenIdentifier>
implements TokenSelector<TokenIdent> {
private Text kindName;
protected AbstractDelegationTokenSelector(Text kindName) {
this.kindName = kindName;
}
@SuppressWarnings("unchecked")
@Override
public Token<TokenIdent> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens) {
if (service == null) {
return null;
}
for (Token<? extends TokenIdentifier> token : tokens) {
if (kindName.equals(token.getKind())
&& service.equals(token.getService())) {
return (Token<TokenIdent>) token;
}
}
return null;
}
}
| 2,113 | 33.096774 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.security.HadoopKerberosName;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.TokenIdentifier;
import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public abstract class AbstractDelegationTokenIdentifier
extends TokenIdentifier {
private static final byte VERSION = 0;
private Text owner;
private Text renewer;
private Text realUser;
private long issueDate;
private long maxDate;
private int sequenceNumber;
private int masterKeyId = 0;
public AbstractDelegationTokenIdentifier() {
this(new Text(), new Text(), new Text());
}
public AbstractDelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
setOwner(owner);
setRenewer(renewer);
setRealUser(realUser);
issueDate = 0;
maxDate = 0;
}
@Override
public abstract Text getKind();
/**
* Get the username encoded in the token identifier
*
* @return the username or owner
*/
@Override
public UserGroupInformation getUser() {
if ( (owner == null) || (owner.toString().isEmpty())) {
return null;
}
final UserGroupInformation realUgi;
final UserGroupInformation ugi;
if ((realUser == null) || (realUser.toString().isEmpty())
|| realUser.equals(owner)) {
ugi = realUgi = UserGroupInformation.createRemoteUser(owner.toString());
} else {
realUgi = UserGroupInformation.createRemoteUser(realUser.toString());
ugi = UserGroupInformation.createProxyUser(owner.toString(), realUgi);
}
realUgi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
return ugi;
}
public Text getOwner() {
return owner;
}
public void setOwner(Text owner) {
if (owner == null) {
this.owner = new Text();
} else {
this.owner = owner;
}
}
public Text getRenewer() {
return renewer;
}
public void setRenewer(Text renewer) {
if (renewer == null) {
this.renewer = new Text();
} else {
HadoopKerberosName renewerKrbName = new HadoopKerberosName(renewer.toString());
try {
this.renewer = new Text(renewerKrbName.getShortName());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
public Text getRealUser() {
return realUser;
}
public void setRealUser(Text realUser) {
if (realUser == null) {
this.realUser = new Text();
} else {
this.realUser = realUser;
}
}
public void setIssueDate(long issueDate) {
this.issueDate = issueDate;
}
public long getIssueDate() {
return issueDate;
}
public void setMaxDate(long maxDate) {
this.maxDate = maxDate;
}
public long getMaxDate() {
return maxDate;
}
public void setSequenceNumber(int seqNum) {
this.sequenceNumber = seqNum;
}
public int getSequenceNumber() {
return sequenceNumber;
}
public void setMasterKeyId(int newId) {
masterKeyId = newId;
}
public int getMasterKeyId() {
return masterKeyId;
}
protected static boolean isEqual(Object a, Object b) {
return a == null ? b == null : a.equals(b);
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj instanceof AbstractDelegationTokenIdentifier) {
AbstractDelegationTokenIdentifier that = (AbstractDelegationTokenIdentifier) obj;
return this.sequenceNumber == that.sequenceNumber
&& this.issueDate == that.issueDate
&& this.maxDate == that.maxDate
&& this.masterKeyId == that.masterKeyId
&& isEqual(this.owner, that.owner)
&& isEqual(this.renewer, that.renewer)
&& isEqual(this.realUser, that.realUser);
}
return false;
}
@Override
public int hashCode() {
return this.sequenceNumber;
}
@Override
public void readFields(DataInput in) throws IOException {
byte version = in.readByte();
if (version != VERSION) {
throw new IOException("Unknown version of delegation token " +
version);
}
owner.readFields(in, Text.DEFAULT_MAX_LEN);
renewer.readFields(in, Text.DEFAULT_MAX_LEN);
realUser.readFields(in, Text.DEFAULT_MAX_LEN);
issueDate = WritableUtils.readVLong(in);
maxDate = WritableUtils.readVLong(in);
sequenceNumber = WritableUtils.readVInt(in);
masterKeyId = WritableUtils.readVInt(in);
}
@VisibleForTesting
void writeImpl(DataOutput out) throws IOException {
out.writeByte(VERSION);
owner.write(out);
renewer.write(out);
realUser.write(out);
WritableUtils.writeVLong(out, issueDate);
WritableUtils.writeVLong(out, maxDate);
WritableUtils.writeVInt(out, sequenceNumber);
WritableUtils.writeVInt(out, masterKeyId);
}
@Override
public void write(DataOutput out) throws IOException {
if (owner.getLength() > Text.DEFAULT_MAX_LEN) {
throw new IOException("owner is too long to be serialized!");
}
if (renewer.getLength() > Text.DEFAULT_MAX_LEN) {
throw new IOException("renewer is too long to be serialized!");
}
if (realUser.getLength() > Text.DEFAULT_MAX_LEN) {
throw new IOException("realuser is too long to be serialized!");
}
writeImpl(out);
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer
.append("owner=" + owner + ", renewer=" + renewer + ", realUser="
+ realUser + ", issueDate=" + issueDate + ", maxDate=" + maxDate
+ ", sequenceNumber=" + sequenceNumber + ", masterKeyId="
+ masterKeyId);
return buffer.toString();
}
}
| 6,965 | 27.785124 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import javax.crypto.SecretKey;
import org.apache.avro.reflect.Nullable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
/**
* Key used for generating and verifying delegation tokens
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class DelegationKey implements Writable {
private int keyId;
private long expiryDate;
@Nullable
private byte[] keyBytes = null;
private static final int MAX_KEY_LEN = 1024 * 1024;
/** Default constructore required for Writable */
public DelegationKey() {
this(0, 0L, (SecretKey)null);
}
public DelegationKey(int keyId, long expiryDate, SecretKey key) {
this(keyId, expiryDate, key != null ? key.getEncoded() : null);
}
public DelegationKey(int keyId, long expiryDate, byte[] encodedKey) {
this.keyId = keyId;
this.expiryDate = expiryDate;
if (encodedKey != null) {
if (encodedKey.length > MAX_KEY_LEN) {
throw new RuntimeException("can't create " + encodedKey.length +
" byte long DelegationKey.");
}
this.keyBytes = encodedKey;
}
}
public int getKeyId() {
return keyId;
}
public long getExpiryDate() {
return expiryDate;
}
public SecretKey getKey() {
if (keyBytes == null || keyBytes.length == 0) {
return null;
} else {
SecretKey key = AbstractDelegationTokenSecretManager.createSecretKey(keyBytes);
return key;
}
}
public byte[] getEncodedKey() {
return keyBytes;
}
public void setExpiryDate(long expiryDate) {
this.expiryDate = expiryDate;
}
/**
*/
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, keyId);
WritableUtils.writeVLong(out, expiryDate);
if (keyBytes == null) {
WritableUtils.writeVInt(out, -1);
} else {
WritableUtils.writeVInt(out, keyBytes.length);
out.write(keyBytes);
}
}
/**
*/
@Override
public void readFields(DataInput in) throws IOException {
keyId = WritableUtils.readVInt(in);
expiryDate = WritableUtils.readVLong(in);
int len = WritableUtils.readVIntInRange(in, -1, MAX_KEY_LEN);
if (len == -1) {
keyBytes = null;
} else {
keyBytes = new byte[len];
in.readFully(keyBytes);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (int) (expiryDate ^ (expiryDate >>> 32));
result = prime * result + Arrays.hashCode(keyBytes);
result = prime * result + keyId;
return result;
}
@Override
public boolean equals(Object right) {
if (this == right) {
return true;
} else if (right == null || getClass() != right.getClass()) {
return false;
} else {
DelegationKey r = (DelegationKey) right;
return keyId == r.keyId &&
expiryDate == r.expiryDate &&
Arrays.equals(keyBytes, r.keyBytes);
}
}
}
| 4,076 | 26.924658 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenIdentifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
/**
* Concrete delegation token identifier used by {@link DelegationTokenManager},
* {@link KerberosDelegationTokenAuthenticationHandler} and
* {@link DelegationTokenAuthenticationFilter}.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DelegationTokenIdentifier
extends AbstractDelegationTokenIdentifier {
private Text kind;
public DelegationTokenIdentifier(Text kind) {
this.kind = kind;
}
/**
* Create a new delegation token identifier
*
* @param kind token kind
* @param owner the effective username of the token owner
* @param renewer the username of the renewer
* @param realUser the real username of the token owner
*/
public DelegationTokenIdentifier(Text kind, Text owner, Text renewer,
Text realUser) {
super(owner, renewer, realUser);
this.kind = kind;
}
/**
* Return the delegation token kind
* @return returns the delegation token kind
*/
@Override
public Text getKind() {
return kind;
}
}
| 2,117 | 31.584615 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticationHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the {@link KerberosAuthenticationHandler} configuration
* properties, this handler supports:
* <ul>
* <li>simple.delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>simple.delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>simple.delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>simple.delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>simple.delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class PseudoDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public PseudoDelegationTokenAuthenticationHandler() {
super(new PseudoAuthenticationHandler(PseudoAuthenticationHandler.TYPE +
TYPE_POSTFIX));
}
}
| 2,478 | 43.267857 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.annotations.VisibleForTesting;
import org.apache.curator.framework.CuratorFramework;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.authentication.util.ZKSignerSecretProvider;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.codehaus.jackson.map.ObjectMapper;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.Writer;
import java.nio.charset.Charset;
import java.security.Principal;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* The <code>DelegationTokenAuthenticationFilter</code> filter is a
* {@link AuthenticationFilter} with Hadoop Delegation Token support.
* <p/>
* By default it uses it own instance of the {@link
* AbstractDelegationTokenSecretManager}. For situations where an external
* <code>AbstractDelegationTokenSecretManager</code> is required (i.e. one that
* shares the secret with <code>AbstractDelegationTokenSecretManager</code>
* instance running in other services), the external
* <code>AbstractDelegationTokenSecretManager</code> must be set as an
* attribute in the {@link ServletContext} of the web application using the
* {@link #DELEGATION_TOKEN_SECRET_MANAGER_ATTR} attribute name (
* 'hadoop.http.delegation-token-secret-manager').
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DelegationTokenAuthenticationFilter
extends AuthenticationFilter {
private static final String APPLICATION_JSON_MIME = "application/json";
private static final String ERROR_EXCEPTION_JSON = "exception";
private static final String ERROR_MESSAGE_JSON = "message";
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*/
public static final String DELEGATION_TOKEN_SECRET_MANAGER_ATTR =
"hadoop.http.delegation-token-secret-manager";
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
private static final ThreadLocal<UserGroupInformation> UGI_TL =
new ThreadLocal<UserGroupInformation>();
public static final String PROXYUSER_PREFIX = "proxyuser";
private SaslRpcServer.AuthMethod handlerAuthMethod;
/**
* It delegates to
* {@link AuthenticationFilter#getConfiguration(String, FilterConfig)} and
* then overrides the {@link AuthenticationHandler} to use if authentication
* type is set to <code>simple</code> or <code>kerberos</code> in order to use
* the corresponding implementation with delegation token support.
*
* @param configPrefix parameter not used.
* @param filterConfig parameter not used.
* @return hadoop-auth de-prefixed configuration for the filter and handler.
*/
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException {
Properties props = super.getConfiguration(configPrefix, filterConfig);
setAuthHandlerClass(props);
return props;
}
/**
* Set AUTH_TYPE property to the name of the corresponding authentication
* handler class based on the input properties.
* @param props input properties.
*/
protected void setAuthHandlerClass(Properties props)
throws ServletException {
String authType = props.getProperty(AUTH_TYPE);
if (authType == null) {
throw new ServletException("Config property "
+ AUTH_TYPE + " doesn't exist");
}
if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
} else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
}
}
/**
* Returns the proxyuser configuration. All returned properties must start
* with <code>proxyuser.</code>'
* <p/>
* Subclasses may override this method if the proxyuser configuration is
* read from other place than the filter init parameters.
*
* @param filterConfig filter configuration object
* @return the proxyuser configuration properties.
* @throws ServletException thrown if the configuration could not be created.
*/
protected Configuration getProxyuserConfiguration(FilterConfig filterConfig)
throws ServletException {
// this filter class gets the configuration from the filter configs, we are
// creating an empty configuration and injecting the proxyuser settings in
// it. In the initialization of the filter, the returned configuration is
// passed to the ProxyUsers which only looks for 'proxyusers.' properties.
Configuration conf = new Configuration(false);
Enumeration<?> names = filterConfig.getInitParameterNames();
while (names.hasMoreElements()) {
String name = (String) names.nextElement();
if (name.startsWith(PROXYUSER_PREFIX + ".")) {
String value = filterConfig.getInitParameter(name);
conf.set(name, value);
}
}
return conf;
}
@Override
public void init(FilterConfig filterConfig) throws ServletException {
super.init(filterConfig);
AuthenticationHandler handler = getAuthenticationHandler();
AbstractDelegationTokenSecretManager dtSecretManager =
(AbstractDelegationTokenSecretManager) filterConfig.getServletContext().
getAttribute(DELEGATION_TOKEN_SECRET_MANAGER_ATTR);
if (dtSecretManager != null && handler
instanceof DelegationTokenAuthenticationHandler) {
DelegationTokenAuthenticationHandler dtHandler =
(DelegationTokenAuthenticationHandler) getAuthenticationHandler();
dtHandler.setExternalDelegationTokenSecretManager(dtSecretManager);
}
if (handler instanceof PseudoAuthenticationHandler ||
handler instanceof PseudoDelegationTokenAuthenticationHandler) {
setHandlerAuthMethod(SaslRpcServer.AuthMethod.SIMPLE);
}
if (handler instanceof KerberosAuthenticationHandler ||
handler instanceof KerberosDelegationTokenAuthenticationHandler) {
setHandlerAuthMethod(SaslRpcServer.AuthMethod.KERBEROS);
}
// proxyuser configuration
Configuration conf = getProxyuserConfiguration(filterConfig);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX);
}
@Override
protected void initializeAuthHandler(String authHandlerClassName,
FilterConfig filterConfig) throws ServletException {
// A single CuratorFramework should be used for a ZK cluster.
// If the ZKSignerSecretProvider has already created it, it has to
// be set here... to be used by the ZKDelegationTokenSecretManager
ZKDelegationTokenSecretManager.setCurator((CuratorFramework)
filterConfig.getServletContext().getAttribute(ZKSignerSecretProvider.
ZOOKEEPER_SIGNER_SECRET_PROVIDER_CURATOR_CLIENT_ATTRIBUTE));
super.initializeAuthHandler(authHandlerClassName, filterConfig);
ZKDelegationTokenSecretManager.setCurator(null);
}
protected void setHandlerAuthMethod(SaslRpcServer.AuthMethod authMethod) {
this.handlerAuthMethod = authMethod;
}
@VisibleForTesting
static String getDoAs(HttpServletRequest request) {
List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(),
UTF8_CHARSET);
if (list != null) {
for (NameValuePair nv : list) {
if (DelegationTokenAuthenticatedURL.DO_AS.
equalsIgnoreCase(nv.getName())) {
return nv.getValue();
}
}
}
return null;
}
static UserGroupInformation getHttpUserGroupInformationInContext() {
return UGI_TL.get();
}
@Override
protected void doFilter(FilterChain filterChain, HttpServletRequest request,
HttpServletResponse response) throws IOException, ServletException {
boolean requestCompleted = false;
UserGroupInformation ugi = null;
AuthenticationToken authToken = (AuthenticationToken)
request.getUserPrincipal();
if (authToken != null && authToken != AuthenticationToken.ANONYMOUS) {
// if the request was authenticated because of a delegation token,
// then we ignore proxyuser (this is the same as the RPC behavior).
ugi = (UserGroupInformation) request.getAttribute(
DelegationTokenAuthenticationHandler.DELEGATION_TOKEN_UGI_ATTRIBUTE);
if (ugi == null) {
String realUser = request.getUserPrincipal().getName();
ugi = UserGroupInformation.createRemoteUser(realUser,
handlerAuthMethod);
String doAsUser = getDoAs(request);
if (doAsUser != null) {
ugi = UserGroupInformation.createProxyUser(doAsUser, ugi);
try {
ProxyUsers.authorize(ugi, request.getRemoteAddr());
} catch (AuthorizationException ex) {
HttpExceptionUtils.createServletExceptionResponse(response,
HttpServletResponse.SC_FORBIDDEN, ex);
requestCompleted = true;
}
}
}
UGI_TL.set(ugi);
}
if (!requestCompleted) {
final UserGroupInformation ugiF = ugi;
try {
request = new HttpServletRequestWrapper(request) {
@Override
public String getAuthType() {
return (ugiF != null) ? handlerAuthMethod.toString() : null;
}
@Override
public String getRemoteUser() {
return (ugiF != null) ? ugiF.getShortUserName() : null;
}
@Override
public Principal getUserPrincipal() {
return (ugiF != null) ? new Principal() {
@Override
public String getName() {
return ugiF.getUserName();
}
} : null;
}
};
super.doFilter(filterChain, request, response);
} finally {
UGI_TL.remove();
}
}
}
}
| 12,209 | 39.83612 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.List;
/**
* Servlet utility methods.
*/
@InterfaceAudience.Private
class ServletUtils {
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
/**
* Extract a query string parameter without triggering http parameters
* processing by the servlet container.
*
* @param request the request
* @param name the parameter to get the value.
* @return the parameter value, or <code>NULL</code> if the parameter is not
* defined.
* @throws IOException thrown if there was an error parsing the query string.
*/
public static String getParameter(HttpServletRequest request, String name)
throws IOException {
List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(),
UTF8_CHARSET);
if (list != null) {
for (NameValuePair nv : list) {
if (name.equals(nv.getName())) {
return nv.getValue();
}
}
}
return null;
}
}
| 2,078 | 33.65 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.Map;
/**
* The <code>DelegationTokenAuthenticatedURL</code> is a
* {@link AuthenticatedURL} sub-class with built-in Hadoop Delegation Token
* functionality.
* <p/>
* The authentication mechanisms supported by default are Hadoop Simple
* authentication (also known as pseudo authentication) and Kerberos SPNEGO
* authentication.
* <p/>
* Additional authentication mechanisms can be supported via {@link
* DelegationTokenAuthenticator} implementations.
* <p/>
* The default {@link DelegationTokenAuthenticator} is the {@link
* KerberosDelegationTokenAuthenticator} class which supports
* automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication via
* the {@link PseudoDelegationTokenAuthenticator} class.
* <p/>
* <code>AuthenticatedURL</code> instances are not thread-safe.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
/**
* Constant used in URL's query string to perform a proxy user request, the
* value of the <code>DO_AS</code> parameter is the user the request will be
* done on behalf of.
*/
static final String DO_AS = "doAs";
/**
* Client side authentication token that handles Delegation Tokens.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public static class Token extends AuthenticatedURL.Token {
private
org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
delegationToken;
public org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
getDelegationToken() {
return delegationToken;
}
public void setDelegationToken(
org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier> delegationToken) {
this.delegationToken = delegationToken;
}
}
private static Class<? extends DelegationTokenAuthenticator>
DEFAULT_AUTHENTICATOR = KerberosDelegationTokenAuthenticator.class;
/**
* Sets the default {@link DelegationTokenAuthenticator} class to use when an
* {@link DelegationTokenAuthenticatedURL} instance is created without
* specifying one.
*
* The default class is {@link KerberosDelegationTokenAuthenticator}
*
* @param authenticator the authenticator class to use as default.
*/
public static void setDefaultDelegationTokenAuthenticator(
Class<? extends DelegationTokenAuthenticator> authenticator) {
DEFAULT_AUTHENTICATOR = authenticator;
}
/**
* Returns the default {@link DelegationTokenAuthenticator} class to use when
* an {@link DelegationTokenAuthenticatedURL} instance is created without
* specifying one.
* <p/>
* The default class is {@link KerberosDelegationTokenAuthenticator}
*
* @return the delegation token authenticator class to use as default.
*/
public static Class<? extends DelegationTokenAuthenticator>
getDefaultDelegationTokenAuthenticator() {
return DEFAULT_AUTHENTICATOR;
}
private static DelegationTokenAuthenticator
obtainDelegationTokenAuthenticator(DelegationTokenAuthenticator dta,
ConnectionConfigurator connConfigurator) {
try {
if (dta == null) {
dta = DEFAULT_AUTHENTICATOR.newInstance();
dta.setConnectionConfigurator(connConfigurator);
}
return dta;
} catch (Exception ex) {
throw new IllegalArgumentException(ex);
}
}
private boolean useQueryStringforDelegationToken = false;
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
* <p/>
* An instance of the default {@link DelegationTokenAuthenticator} will be
* used.
*/
public DelegationTokenAuthenticatedURL() {
this(null, null);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
*
* @param authenticator the {@link DelegationTokenAuthenticator} instance to
* use, if <code>null</code> the default one will be used.
*/
public DelegationTokenAuthenticatedURL(
DelegationTokenAuthenticator authenticator) {
this(authenticator, null);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code> using the default
* {@link DelegationTokenAuthenticator} class.
*
* @param connConfigurator a connection configurator.
*/
public DelegationTokenAuthenticatedURL(
ConnectionConfigurator connConfigurator) {
this(null, connConfigurator);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
*
* @param authenticator the {@link DelegationTokenAuthenticator} instance to
* use, if <code>null</code> the default one will be used.
* @param connConfigurator a connection configurator.
*/
public DelegationTokenAuthenticatedURL(
DelegationTokenAuthenticator authenticator,
ConnectionConfigurator connConfigurator) {
super(obtainDelegationTokenAuthenticator(authenticator, connConfigurator),
connConfigurator);
}
/**
* Sets if delegation token should be transmitted in the URL query string.
* By default it is transmitted using the
* {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
* <p/>
* This method is provided to enable WebHDFS backwards compatibility.
*
* @param useQueryString <code>TRUE</code> if the token is transmitted in the
* URL query string, <code>FALSE</code> if the delegation token is transmitted
* using the {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP
* header.
*/
@Deprecated
protected void setUseQueryStringForDelegationToken(boolean useQueryString) {
useQueryStringforDelegationToken = useQueryString;
}
/**
* Returns if delegation token is transmitted as a HTTP header.
*
* @return <code>TRUE</code> if the token is transmitted in the URL query
* string, <code>FALSE</code> if the delegation token is transmitted using the
* {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
*/
public boolean useQueryStringForDelegationToken() {
return useQueryStringforDelegationToken;
}
/**
* Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
* Token only if the given auth token is an instance of {@link Token} and
* it contains a Delegation Token, otherwise use the configured
* {@link DelegationTokenAuthenticator} to authenticate the connection.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
@Override
public HttpURLConnection openConnection(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
return (token instanceof Token) ? openConnection(url, (Token) token)
: super.openConnection(url ,token);
}
/**
* Returns an authenticated {@link HttpURLConnection}. If the Delegation
* Token is present, it will be used taking precedence over the configured
* <code>Authenticator</code>.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public HttpURLConnection openConnection(URL url, Token token)
throws IOException, AuthenticationException {
return openConnection(url, token, null);
}
private URL augmentURL(URL url, Map<String, String> params)
throws IOException {
if (params != null && params.size() > 0) {
String urlStr = url.toExternalForm();
StringBuilder sb = new StringBuilder(urlStr);
String separator = (urlStr.contains("?")) ? "&" : "?";
for (Map.Entry<String, String> param : params.entrySet()) {
sb.append(separator).append(param.getKey()).append("=").append(
param.getValue());
separator = "&";
}
url = new URL(sb.toString());
}
return url;
}
/**
* Returns an authenticated {@link HttpURLConnection}. If the Delegation
* Token is present, it will be used taking precedence over the configured
* <code>Authenticator</code>. If the <code>doAs</code> parameter is not NULL,
* the request will be done on behalf of the specified <code>doAs</code> user.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @param doAs user to do the the request on behalf of, if NULL the request is
* as self.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
@SuppressWarnings("unchecked")
public HttpURLConnection openConnection(URL url, Token token, String doAs)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Map<String, String> extraParams = new HashMap<String, String>();
org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dToken
= null;
// if we have valid auth token, it takes precedence over a delegation token
// and we don't even look for one.
if (!token.isSet()) {
// delegation token
Credentials creds = UserGroupInformation.getCurrentUser().
getCredentials();
if (!creds.getAllTokens().isEmpty()) {
InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
url.getPort());
Text service = SecurityUtil.buildTokenService(serviceAddr);
dToken = creds.getToken(service);
if (dToken != null) {
if (useQueryStringForDelegationToken()) {
// delegation token will go in the query string, injecting it
extraParams.put(
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
dToken.encodeToUrlString());
} else {
// delegation token will go as request header, setting it in the
// auth-token to ensure no authentication handshake is triggered
// (if we have a delegation token, we are authenticated)
// the delegation token header is injected in the connection request
// at the end of this method.
token.delegationToken = (org.apache.hadoop.security.token.Token
<AbstractDelegationTokenIdentifier>) dToken;
}
}
}
}
// proxyuser
if (doAs != null) {
extraParams.put(DO_AS, URLEncoder.encode(doAs, "UTF-8"));
}
url = augmentURL(url, extraParams);
HttpURLConnection conn = super.openConnection(url, token);
if (!token.isSet() && !useQueryStringForDelegationToken() && dToken != null) {
// injecting the delegation token header in the connection request
conn.setRequestProperty(
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
dToken.encodeToUrlString());
}
return conn;
}
/**
* Requests a delegation token using the configured <code>Authenticator</code>
* for authentication.
*
* @param url the URL to get the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token being used for the user where the
* Delegation token will be stored.
* @param renewer the renewer user.
* @return a delegation token.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
getDelegationToken(URL url, Token token, String renewer)
throws IOException, AuthenticationException {
return getDelegationToken(url, token, renewer, null);
}
/**
* Requests a delegation token using the configured <code>Authenticator</code>
* for authentication.
*
* @param url the URL to get the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token being used for the user where the
* Delegation token will be stored.
* @param renewer the renewer user.
* @param doAsUser the user to do as, which will be the token owner.
* @return a delegation token.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
getDelegationToken(URL url, Token token, String renewer, String doAsUser)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
try {
token.delegationToken =
((KerberosDelegationTokenAuthenticator) getAuthenticator()).
getDelegationToken(url, token, renewer, doAsUser);
return token.delegationToken;
} catch (IOException ex) {
token.delegationToken = null;
throw ex;
}
}
/**
* Renews a delegation token from the server end-point using the
* configured <code>Authenticator</code> for authentication.
*
* @param url the URL to renew the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token with the Delegation Token to renew.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public long renewDelegationToken(URL url, Token token)
throws IOException, AuthenticationException {
return renewDelegationToken(url, token, null);
}
/**
* Renews a delegation token from the server end-point using the
* configured <code>Authenticator</code> for authentication.
*
* @param url the URL to renew the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token with the Delegation Token to renew.
* @param doAsUser the user to do as, which will be the token owner.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public long renewDelegationToken(URL url, Token token, String doAsUser)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Preconditions.checkNotNull(token.delegationToken,
"No delegation token available");
try {
return ((KerberosDelegationTokenAuthenticator) getAuthenticator()).
renewDelegationToken(url, token, token.delegationToken, doAsUser);
} catch (IOException ex) {
token.delegationToken = null;
throw ex;
}
}
/**
* Cancels a delegation token from the server end-point. It does not require
* being authenticated by the configured <code>Authenticator</code>.
*
* @param url the URL to cancel the delegation token from. Only HTTP/S URLs
* are supported.
* @param token the authentication token with the Delegation Token to cancel.
* @throws IOException if an IO error occurred.
*/
public void cancelDelegationToken(URL url, Token token)
throws IOException {
cancelDelegationToken(url, token, null);
}
/**
* Cancels a delegation token from the server end-point. It does not require
* being authenticated by the configured <code>Authenticator</code>.
*
* @param url the URL to cancel the delegation token from. Only HTTP/S URLs
* are supported.
* @param token the authentication token with the Delegation Token to cancel.
* @param doAsUser the user to do as, which will be the token owner.
* @throws IOException if an IO error occurred.
*/
public void cancelDelegationToken(URL url, Token token, String doAsUser)
throws IOException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Preconditions.checkNotNull(token.delegationToken,
"No delegation token available");
try {
((KerberosDelegationTokenAuthenticator) getAuthenticator()).
cancelDelegationToken(url, token, token.delegationToken, doAsUser);
} finally {
token.delegationToken = null;
}
}
}
| 18,315 | 38.73102 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
/**
* The <code>KerberosDelegationTokenAuthenticator</code> provides support for
* Kerberos SPNEGO authentication mechanism and support for Hadoop Delegation
* Token operations.
* <p/>
* It falls back to the {@link PseudoDelegationTokenAuthenticator} if the HTTP
* endpoint does not trigger a SPNEGO authentication
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class KerberosDelegationTokenAuthenticator
extends DelegationTokenAuthenticator {
public KerberosDelegationTokenAuthenticator() {
super(new KerberosAuthenticator() {
@Override
protected Authenticator getFallBackAuthenticator() {
return new PseudoDelegationTokenAuthenticator();
}
});
}
}
| 1,853 | 38.446809 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import java.io.IOException;
/**
* The <code>PseudoDelegationTokenAuthenticator</code> provides support for
* Hadoop's pseudo authentication mechanism that accepts
* the user name specified as a query string parameter and support for Hadoop
* Delegation Token operations.
* <p/>
* This mimics the model of Hadoop Simple authentication trusting the
* {@link UserGroupInformation#getCurrentUser()} value.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class PseudoDelegationTokenAuthenticator
extends DelegationTokenAuthenticator {
public PseudoDelegationTokenAuthenticator() {
super(new PseudoAuthenticator() {
@Override
protected String getUserName() {
try {
return UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
});
}
}
| 2,019 | 35.727273 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticationHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the {@link KerberosAuthenticationHandler} configuration
* properties, this handler supports:
* <ul>
* <li>kerberos.delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>kerberos.delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>kerberos.delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>kerberos.delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>kerberos.delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class KerberosDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public KerberosDelegationTokenAuthenticationHandler() {
super(new KerberosAuthenticationHandler(KerberosAuthenticationHandler.TYPE +
TYPE_POSTFIX));
}
}
| 2,411 | 42.854545 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/HttpUserGroupInformation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation;
import javax.servlet.http.HttpServletRequest;
/**
* Util class that returns the remote {@link UserGroupInformation} in scope
* for the HTTP request.
*/
@InterfaceAudience.Private
public class HttpUserGroupInformation {
/**
* Returns the remote {@link UserGroupInformation} in context for the current
* HTTP request, taking into account proxy user requests.
*
* @return the remote {@link UserGroupInformation}, <code>NULL</code> if none.
*/
public static UserGroupInformation get() {
return DelegationTokenAuthenticationFilter.
getHttpUserGroupInformationInContext();
}
}
| 1,591 | 35.181818 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.apache.hadoop.util.StringUtils;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.Map;
/**
* {@link Authenticator} wrapper that enhances an {@link Authenticator} with
* Delegation Token support.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class DelegationTokenAuthenticator implements Authenticator {
private static Logger LOG =
LoggerFactory.getLogger(DelegationTokenAuthenticator.class);
private static final String CONTENT_TYPE = "Content-Type";
private static final String APPLICATION_JSON_MIME = "application/json";
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
public static final String OP_PARAM = "op";
public static final String DELEGATION_TOKEN_HEADER =
"X-Hadoop-Delegation-Token";
public static final String DELEGATION_PARAM = "delegation";
public static final String TOKEN_PARAM = "token";
public static final String RENEWER_PARAM = "renewer";
public static final String DELEGATION_TOKEN_JSON = "Token";
public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
/**
* DelegationToken operations.
*/
@InterfaceAudience.Private
public static enum DelegationTokenOperation {
GETDELEGATIONTOKEN(HTTP_GET, true),
RENEWDELEGATIONTOKEN(HTTP_PUT, true),
CANCELDELEGATIONTOKEN(HTTP_PUT, false);
private String httpMethod;
private boolean requiresKerberosCredentials;
private DelegationTokenOperation(String httpMethod,
boolean requiresKerberosCredentials) {
this.httpMethod = httpMethod;
this.requiresKerberosCredentials = requiresKerberosCredentials;
}
public String getHttpMethod() {
return httpMethod;
}
public boolean requiresKerberosCredentials() {
return requiresKerberosCredentials;
}
}
private Authenticator authenticator;
private ConnectionConfigurator connConfigurator;
public DelegationTokenAuthenticator(Authenticator authenticator) {
this.authenticator = authenticator;
}
@Override
public void setConnectionConfigurator(ConnectionConfigurator configurator) {
authenticator.setConnectionConfigurator(configurator);
connConfigurator = configurator;
}
private boolean hasDelegationToken(URL url, AuthenticatedURL.Token token) {
boolean hasDt = false;
if (token instanceof DelegationTokenAuthenticatedURL.Token) {
hasDt = ((DelegationTokenAuthenticatedURL.Token) token).
getDelegationToken() != null;
}
if (!hasDt) {
String queryStr = url.getQuery();
hasDt = (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
}
return hasDt;
}
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
if (!hasDelegationToken(url, token)) {
authenticator.authenticate(url, token);
}
}
/**
* Requests a delegation token using the configured <code>Authenticator</code>
* for authentication.
*
* @param url the URL to get the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token being used for the user where the
* Delegation token will be stored.
* @param renewer the renewer user.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public Token<AbstractDelegationTokenIdentifier> getDelegationToken(URL url,
AuthenticatedURL.Token token, String renewer)
throws IOException, AuthenticationException {
return getDelegationToken(url, token, renewer, null);
}
/**
* Requests a delegation token using the configured <code>Authenticator</code>
* for authentication.
*
* @param url the URL to get the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token being used for the user where the
* Delegation token will be stored.
* @param renewer the renewer user.
* @param doAsUser the user to do as, which will be the token owner.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public Token<AbstractDelegationTokenIdentifier> getDelegationToken(URL url,
AuthenticatedURL.Token token, String renewer, String doAsUser)
throws IOException, AuthenticationException {
Map json = doDelegationTokenOperation(url, token,
DelegationTokenOperation.GETDELEGATIONTOKEN, renewer, null, true,
doAsUser);
json = (Map) json.get(DELEGATION_TOKEN_JSON);
String tokenStr = (String) json.get(DELEGATION_TOKEN_URL_STRING_JSON);
Token<AbstractDelegationTokenIdentifier> dToken =
new Token<AbstractDelegationTokenIdentifier>();
dToken.decodeFromUrlString(tokenStr);
InetSocketAddress service = new InetSocketAddress(url.getHost(),
url.getPort());
SecurityUtil.setTokenService(dToken, service);
return dToken;
}
/**
* Renews a delegation token from the server end-point using the
* configured <code>Authenticator</code> for authentication.
*
* @param url the URL to renew the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token with the Delegation Token to renew.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public long renewDelegationToken(URL url,
AuthenticatedURL.Token token,
Token<AbstractDelegationTokenIdentifier> dToken)
throws IOException, AuthenticationException {
return renewDelegationToken(url, token, dToken, null);
}
/**
* Renews a delegation token from the server end-point using the
* configured <code>Authenticator</code> for authentication.
*
* @param url the URL to renew the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token with the Delegation Token to renew.
* @param doAsUser the user to do as, which will be the token owner.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public long renewDelegationToken(URL url,
AuthenticatedURL.Token token,
Token<AbstractDelegationTokenIdentifier> dToken, String doAsUser)
throws IOException, AuthenticationException {
Map json = doDelegationTokenOperation(url, token,
DelegationTokenOperation.RENEWDELEGATIONTOKEN, null, dToken, true,
doAsUser);
return (Long) json.get(RENEW_DELEGATION_TOKEN_JSON);
}
/**
* Cancels a delegation token from the server end-point. It does not require
* being authenticated by the configured <code>Authenticator</code>.
*
* @param url the URL to cancel the delegation token from. Only HTTP/S URLs
* are supported.
* @param token the authentication token with the Delegation Token to cancel.
* @throws IOException if an IO error occurred.
*/
public void cancelDelegationToken(URL url,
AuthenticatedURL.Token token,
Token<AbstractDelegationTokenIdentifier> dToken)
throws IOException {
cancelDelegationToken(url, token, dToken, null);
}
/**
* Cancels a delegation token from the server end-point. It does not require
* being authenticated by the configured <code>Authenticator</code>.
*
* @param url the URL to cancel the delegation token from. Only HTTP/S URLs
* are supported.
* @param token the authentication token with the Delegation Token to cancel.
* @param doAsUser the user to do as, which will be the token owner.
* @throws IOException if an IO error occurred.
*/
public void cancelDelegationToken(URL url,
AuthenticatedURL.Token token,
Token<AbstractDelegationTokenIdentifier> dToken, String doAsUser)
throws IOException {
try {
doDelegationTokenOperation(url, token,
DelegationTokenOperation.CANCELDELEGATIONTOKEN, null, dToken, false,
doAsUser);
} catch (AuthenticationException ex) {
throw new IOException("This should not happen: " + ex.getMessage(), ex);
}
}
private Map doDelegationTokenOperation(URL url,
AuthenticatedURL.Token token, DelegationTokenOperation operation,
String renewer, Token<?> dToken, boolean hasResponse, String doAsUser)
throws IOException, AuthenticationException {
Map ret = null;
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, operation.toString());
if (renewer != null) {
params.put(RENEWER_PARAM, renewer);
}
if (dToken != null) {
params.put(TOKEN_PARAM, dToken.encodeToUrlString());
}
// proxyuser
if (doAsUser != null) {
params.put(DelegationTokenAuthenticatedURL.DO_AS,
URLEncoder.encode(doAsUser, "UTF-8"));
}
String urlStr = url.toExternalForm();
StringBuilder sb = new StringBuilder(urlStr);
String separator = (urlStr.contains("?")) ? "&" : "?";
for (Map.Entry<String, String> entry : params.entrySet()) {
sb.append(separator).append(entry.getKey()).append("=").
append(URLEncoder.encode(entry.getValue(), "UTF8"));
separator = "&";
}
url = new URL(sb.toString());
AuthenticatedURL aUrl = new AuthenticatedURL(this, connConfigurator);
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(operation.getHttpMethod());
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
if (hasResponse) {
String contentType = conn.getHeaderField(CONTENT_TYPE);
contentType = (contentType != null) ? StringUtils.toLowerCase(contentType)
: null;
if (contentType != null &&
contentType.contains(APPLICATION_JSON_MIME)) {
try {
ObjectMapper mapper = new ObjectMapper();
ret = mapper.readValue(conn.getInputStream(), Map.class);
} catch (Exception ex) {
throw new AuthenticationException(String.format(
"'%s' did not handle the '%s' delegation token operation: %s",
url.getAuthority(), operation, ex.getMessage()), ex);
}
} else {
throw new AuthenticationException(String.format("'%s' did not " +
"respond with JSON to the '%s' delegation token operation",
url.getAuthority(), operation));
}
}
return ret;
}
}
| 12,350 | 38.586538 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager;
import com.google.common.annotations.VisibleForTesting;
/**
* Delegation Token Manager used by the
* {@link KerberosDelegationTokenAuthenticationHandler}.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DelegationTokenManager {
public static final String ENABLE_ZK_KEY = "zk-dt-secret-manager.enable";
public static final String PREFIX = "delegation-token.";
public static final String UPDATE_INTERVAL = PREFIX + "update-interval.sec";
public static final long UPDATE_INTERVAL_DEFAULT = 24 * 60 * 60;
public static final String MAX_LIFETIME = PREFIX + "max-lifetime.sec";
public static final long MAX_LIFETIME_DEFAULT = 7 * 24 * 60 * 60;
public static final String RENEW_INTERVAL = PREFIX + "renew-interval.sec";
public static final long RENEW_INTERVAL_DEFAULT = 24 * 60 * 60;
public static final String REMOVAL_SCAN_INTERVAL = PREFIX +
"removal-scan-interval.sec";
public static final long REMOVAL_SCAN_INTERVAL_DEFAULT = 60 * 60;
private static class DelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
private Text tokenKind;
public DelegationTokenSecretManager(Configuration conf, Text tokenKind) {
super(conf.getLong(UPDATE_INTERVAL, UPDATE_INTERVAL_DEFAULT) * 1000,
conf.getLong(MAX_LIFETIME, MAX_LIFETIME_DEFAULT) * 1000,
conf.getLong(RENEW_INTERVAL, RENEW_INTERVAL_DEFAULT) * 1000,
conf.getLong(REMOVAL_SCAN_INTERVAL,
REMOVAL_SCAN_INTERVAL_DEFAULT * 1000));
this.tokenKind = tokenKind;
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(tokenKind);
}
@Override
public DelegationTokenIdentifier decodeTokenIdentifier(
Token<DelegationTokenIdentifier> token) throws IOException {
return DelegationTokenManager.decodeToken(token, tokenKind);
}
}
private static class ZKSecretManager
extends ZKDelegationTokenSecretManager<DelegationTokenIdentifier> {
private Text tokenKind;
public ZKSecretManager(Configuration conf, Text tokenKind) {
super(conf);
this.tokenKind = tokenKind;
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(tokenKind);
}
@Override
public DelegationTokenIdentifier decodeTokenIdentifier(
Token<DelegationTokenIdentifier> token) throws IOException {
return DelegationTokenManager.decodeToken(token, tokenKind);
}
}
private AbstractDelegationTokenSecretManager secretManager = null;
private boolean managedSecretManager;
public DelegationTokenManager(Configuration conf, Text tokenKind) {
if (conf.getBoolean(ENABLE_ZK_KEY, false)) {
this.secretManager = new ZKSecretManager(conf, tokenKind);
} else {
this.secretManager = new DelegationTokenSecretManager(conf, tokenKind);
}
managedSecretManager = true;
}
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*
* @param secretManager a <code>DelegationTokenSecretManager</code> instance
*/
public void setExternalDelegationTokenSecretManager(
AbstractDelegationTokenSecretManager secretManager) {
this.secretManager.stopThreads();
this.secretManager = secretManager;
managedSecretManager = false;
}
public void init() {
if (managedSecretManager) {
try {
secretManager.startThreads();
} catch (IOException ex) {
throw new RuntimeException("Could not start " +
secretManager.getClass() + ": " + ex.toString(), ex);
}
}
}
public void destroy() {
if (managedSecretManager) {
secretManager.stopThreads();
}
}
@SuppressWarnings("unchecked")
public Token<? extends AbstractDelegationTokenIdentifier> createToken(
UserGroupInformation ugi, String renewer) {
renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
String user = ugi.getUserName();
Text owner = new Text(user);
Text realUser = null;
if (ugi.getRealUser() != null) {
realUser = new Text(ugi.getRealUser().getUserName());
}
AbstractDelegationTokenIdentifier tokenIdentifier =
(AbstractDelegationTokenIdentifier) secretManager.createIdentifier();
tokenIdentifier.setOwner(owner);
tokenIdentifier.setRenewer(new Text(renewer));
tokenIdentifier.setRealUser(realUser);
return new Token(tokenIdentifier, secretManager);
}
@SuppressWarnings("unchecked")
public long renewToken(
Token<? extends AbstractDelegationTokenIdentifier> token, String renewer)
throws IOException {
return secretManager.renewToken(token, renewer);
}
@SuppressWarnings("unchecked")
public void cancelToken(
Token<? extends AbstractDelegationTokenIdentifier> token,
String canceler) throws IOException {
canceler = (canceler != null) ? canceler :
verifyToken(token).getShortUserName();
secretManager.cancelToken(token, canceler);
}
@SuppressWarnings("unchecked")
public UserGroupInformation verifyToken(
Token<? extends AbstractDelegationTokenIdentifier> token)
throws IOException {
AbstractDelegationTokenIdentifier id = secretManager.decodeTokenIdentifier(token);
secretManager.verifyToken(id, token.getPassword());
return id.getUser();
}
@VisibleForTesting
@SuppressWarnings("rawtypes")
public AbstractDelegationTokenSecretManager getDelegationTokenSecretManager() {
return secretManager;
}
private static DelegationTokenIdentifier decodeToken(
Token<DelegationTokenIdentifier> token, Text tokenKind)
throws IOException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream dis = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
id.readFields(dis);
dis.close();
return id;
}
}
| 7,699 | 34.648148 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import java.io.IOException;
import java.io.Writer;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.apache.hadoop.util.StringUtils;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the wrapped {@link AuthenticationHandler} configuration
* properties, this handler supports the following properties prefixed
* with the type of the wrapped <code>AuthenticationHandler</code>:
* <ul>
* <li>delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class DelegationTokenAuthenticationHandler
implements AuthenticationHandler {
protected static final String TYPE_POSTFIX = "-dt";
public static final String PREFIX = "delegation-token.";
public static final String TOKEN_KIND = PREFIX + "token-kind";
private static final Set<String> DELEGATION_TOKEN_OPS = new HashSet<String>();
public static final String DELEGATION_TOKEN_UGI_ATTRIBUTE =
"hadoop.security.delegation-token.ugi";
static {
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
}
private AuthenticationHandler authHandler;
private DelegationTokenManager tokenManager;
private String authType;
public DelegationTokenAuthenticationHandler(AuthenticationHandler handler) {
authHandler = handler;
authType = handler.getType();
}
@VisibleForTesting
DelegationTokenManager getTokenManager() {
return tokenManager;
}
@Override
public void init(Properties config) throws ServletException {
authHandler.init(config);
initTokenManager(config);
}
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*
* @param secretManager a <code>DelegationTokenSecretManager</code> instance
*/
public void setExternalDelegationTokenSecretManager(
AbstractDelegationTokenSecretManager secretManager) {
tokenManager.setExternalDelegationTokenSecretManager(secretManager);
}
@VisibleForTesting
@SuppressWarnings("unchecked")
public void initTokenManager(Properties config) {
Configuration conf = new Configuration(false);
for (Map.Entry entry : config.entrySet()) {
conf.set((String) entry.getKey(), (String) entry.getValue());
}
String tokenKind = conf.get(TOKEN_KIND);
if (tokenKind == null) {
throw new IllegalArgumentException(
"The configuration does not define the token kind");
}
tokenKind = tokenKind.trim();
tokenManager = new DelegationTokenManager(conf, new Text(tokenKind));
tokenManager.init();
}
@Override
public void destroy() {
tokenManager.destroy();
authHandler.destroy();
}
@Override
public String getType() {
return authType;
}
private static final String ENTER = System.getProperty("line.separator");
@Override
@SuppressWarnings("unchecked")
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
boolean requestContinues = true;
String op = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.OP_PARAM);
op = (op != null) ? StringUtils.toUpperCase(op) : null;
if (DELEGATION_TOKEN_OPS.contains(op) &&
!request.getMethod().equals("OPTIONS")) {
KerberosDelegationTokenAuthenticator.DelegationTokenOperation dtOp =
KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.valueOf(op);
if (dtOp.getHttpMethod().equals(request.getMethod())) {
boolean doManagement;
if (dtOp.requiresKerberosCredentials() && token == null) {
token = authenticate(request, response);
if (token == null) {
requestContinues = false;
doManagement = false;
} else {
doManagement = true;
}
} else {
doManagement = true;
}
if (doManagement) {
UserGroupInformation requestUgi = (token != null)
? UserGroupInformation.createRemoteUser(token.getUserName())
: null;
// Create the proxy user if doAsUser exists
String doAsUser = DelegationTokenAuthenticationFilter.getDoAs(request);
if (requestUgi != null && doAsUser != null) {
requestUgi = UserGroupInformation.createProxyUser(
doAsUser, requestUgi);
try {
ProxyUsers.authorize(requestUgi, request.getRemoteAddr());
} catch (AuthorizationException ex) {
HttpExceptionUtils.createServletExceptionResponse(response,
HttpServletResponse.SC_FORBIDDEN, ex);
return false;
}
}
Map map = null;
switch (dtOp) {
case GETDELEGATIONTOKEN:
if (requestUgi == null) {
throw new IllegalStateException("request UGI cannot be NULL");
}
String renewer = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.RENEWER_PARAM);
try {
Token<?> dToken = tokenManager.createToken(requestUgi, renewer);
map = delegationTokenToJSON(dToken);
} catch (IOException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
break;
case RENEWDELEGATIONTOKEN:
if (requestUgi == null) {
throw new IllegalStateException("request UGI cannot be NULL");
}
String tokenToRenew = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
if (tokenToRenew == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]", dtOp,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
);
requestContinues = false;
} else {
Token<AbstractDelegationTokenIdentifier> dt = new Token();
try {
dt.decodeFromUrlString(tokenToRenew);
long expirationTime = tokenManager.renewToken(dt,
requestUgi.getShortUserName());
map = new HashMap();
map.put("long", expirationTime);
} catch (IOException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
}
break;
case CANCELDELEGATIONTOKEN:
String tokenToCancel = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
if (tokenToCancel == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]", dtOp,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
);
requestContinues = false;
} else {
Token<AbstractDelegationTokenIdentifier> dt = new Token();
try {
dt.decodeFromUrlString(tokenToCancel);
tokenManager.cancelToken(dt, (requestUgi != null)
? requestUgi.getShortUserName() : null);
} catch (IOException ex) {
response.sendError(HttpServletResponse.SC_NOT_FOUND,
"Invalid delegation token, cannot cancel");
requestContinues = false;
}
}
break;
}
if (requestContinues) {
response.setStatus(HttpServletResponse.SC_OK);
if (map != null) {
response.setContentType(MediaType.APPLICATION_JSON);
Writer writer = response.getWriter();
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writeValue(writer, map);
writer.write(ENTER);
writer.flush();
}
requestContinues = false;
}
}
} else {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Wrong HTTP method [{0}] for operation [{1}], it should be " +
"[{2}]", request.getMethod(), dtOp, dtOp.getHttpMethod()));
requestContinues = false;
}
}
return requestContinues;
}
@SuppressWarnings("unchecked")
private static Map delegationTokenToJSON(Token token) throws IOException {
Map json = new LinkedHashMap();
json.put(
KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
token.encodeToUrlString());
Map response = new LinkedHashMap();
response.put(KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON,
json);
return response;
}
/**
* Authenticates a request looking for the <code>delegation</code>
* query-string parameter and verifying it is a valid token. If there is not
* <code>delegation</code> query-string parameter, it delegates the
* authentication to the {@link KerberosAuthenticationHandler} unless it is
* disabled.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
* @return the authentication token for the authenticated request.
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if the authentication failed.
*/
@SuppressWarnings("unchecked")
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
String delegationParam = getDelegationToken(request);
if (delegationParam != null) {
try {
Token<AbstractDelegationTokenIdentifier> dt = new Token();
dt.decodeFromUrlString(delegationParam);
UserGroupInformation ugi = tokenManager.verifyToken(dt);
final String shortName = ugi.getShortUserName();
// creating a ephemeral token
token = new AuthenticationToken(shortName, ugi.getUserName(),
getType());
token.setExpires(0);
request.setAttribute(DELEGATION_TOKEN_UGI_ATTRIBUTE, ugi);
} catch (Throwable ex) {
token = null;
HttpExceptionUtils.createServletExceptionResponse(response,
HttpServletResponse.SC_FORBIDDEN, new AuthenticationException(ex));
}
} else {
token = authHandler.authenticate(request, response);
}
return token;
}
private String getDelegationToken(HttpServletRequest request)
throws IOException {
String dToken = request.getHeader(
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER);
if (dToken == null) {
dToken = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
}
return dToken;
}
}
| 14,568 | 38.915068 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLHostnameVerifier.java
|
/*
* $HeadURL$
* $Revision$
* $Date$
*
* ====================================================================
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
*/
package org.apache.hadoop.security.ssl;
import java.io.IOException;
import java.io.InputStream;
import java.security.cert.Certificate;
import java.security.cert.CertificateParsingException;
import java.security.cert.X509Certificate;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.TreeSet;
import javax.net.ssl.SSLException;
import javax.net.ssl.SSLPeerUnverifiedException;
import javax.net.ssl.SSLSession;
import javax.net.ssl.SSLSocket;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
/**
************************************************************************
* Copied from the not-yet-commons-ssl project at
* http://juliusdavies.ca/commons-ssl/
* This project is not yet in Apache, but it is Apache 2.0 licensed.
************************************************************************
* Interface for checking if a hostname matches the names stored inside the
* server's X.509 certificate. Correctly implements
* javax.net.ssl.HostnameVerifier, but that interface is not recommended.
* Instead we added several check() methods that take SSLSocket,
* or X509Certificate, or ultimately (they all end up calling this one),
* String. (It's easier to supply JUnit with Strings instead of mock
* SSLSession objects!)
* </p><p>Our check() methods throw exceptions if the name is
* invalid, whereas javax.net.ssl.HostnameVerifier just returns true/false.
* <p/>
* We provide the HostnameVerifier.DEFAULT, HostnameVerifier.STRICT, and
* HostnameVerifier.ALLOW_ALL implementations. We also provide the more
* specialized HostnameVerifier.DEFAULT_AND_LOCALHOST, as well as
* HostnameVerifier.STRICT_IE6. But feel free to define your own
* implementations!
* <p/>
* Inspired by Sebastian Hauer's original StrictSSLProtocolSocketFactory in the
* HttpClient "contrib" repository.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface SSLHostnameVerifier extends javax.net.ssl.HostnameVerifier {
@Override
boolean verify(String host, SSLSession session);
void check(String host, SSLSocket ssl) throws IOException;
void check(String host, X509Certificate cert) throws SSLException;
void check(String host, String[] cns, String[] subjectAlts)
throws SSLException;
void check(String[] hosts, SSLSocket ssl) throws IOException;
void check(String[] hosts, X509Certificate cert) throws SSLException;
/**
* Checks to see if the supplied hostname matches any of the supplied CNs
* or "DNS" Subject-Alts. Most implementations only look at the first CN,
* and ignore any additional CNs. Most implementations do look at all of
* the "DNS" Subject-Alts. The CNs or Subject-Alts may contain wildcards
* according to RFC 2818.
*
* @param cns CN fields, in order, as extracted from the X.509
* certificate.
* @param subjectAlts Subject-Alt fields of type 2 ("DNS"), as extracted
* from the X.509 certificate.
* @param hosts The array of hostnames to verify.
* @throws SSLException If verification failed.
*/
void check(String[] hosts, String[] cns, String[] subjectAlts)
throws SSLException;
/**
* The DEFAULT HostnameVerifier works the same way as Curl and Firefox.
* <p/>
* The hostname must match either the first CN, or any of the subject-alts.
* A wildcard can occur in the CN, and in any of the subject-alts.
* <p/>
* The only difference between DEFAULT and STRICT is that a wildcard (such
* as "*.foo.com") with DEFAULT matches all subdomains, including
* "a.b.foo.com".
*/
public final static SSLHostnameVerifier DEFAULT =
new AbstractVerifier() {
@Override
public final void check(final String[] hosts, final String[] cns,
final String[] subjectAlts)
throws SSLException {
check(hosts, cns, subjectAlts, false, false);
}
@Override
public final String toString() { return "DEFAULT"; }
};
/**
* The DEFAULT_AND_LOCALHOST HostnameVerifier works like the DEFAULT
* one with one additional relaxation: a host of "localhost",
* "localhost.localdomain", "127.0.0.1", "::1" will always pass, no matter
* what is in the server's certificate.
*/
public final static SSLHostnameVerifier DEFAULT_AND_LOCALHOST =
new AbstractVerifier() {
@Override
public final void check(final String[] hosts, final String[] cns,
final String[] subjectAlts)
throws SSLException {
if (isLocalhost(hosts[0])) {
return;
}
check(hosts, cns, subjectAlts, false, false);
}
@Override
public final String toString() { return "DEFAULT_AND_LOCALHOST"; }
};
/**
* The STRICT HostnameVerifier works the same way as java.net.URL in Sun
* Java 1.4, Sun Java 5, Sun Java 6. It's also pretty close to IE6.
* This implementation appears to be compliant with RFC 2818 for dealing
* with wildcards.
* <p/>
* The hostname must match either the first CN, or any of the subject-alts.
* A wildcard can occur in the CN, and in any of the subject-alts. The
* one divergence from IE6 is how we only check the first CN. IE6 allows
* a match against any of the CNs present. We decided to follow in
* Sun Java 1.4's footsteps and only check the first CN.
* <p/>
* A wildcard such as "*.foo.com" matches only subdomains in the same
* level, for example "a.foo.com". It does not match deeper subdomains
* such as "a.b.foo.com".
*/
public final static SSLHostnameVerifier STRICT =
new AbstractVerifier() {
@Override
public final void check(final String[] host, final String[] cns,
final String[] subjectAlts)
throws SSLException {
check(host, cns, subjectAlts, false, true);
}
@Override
public final String toString() { return "STRICT"; }
};
/**
* The STRICT_IE6 HostnameVerifier works just like the STRICT one with one
* minor variation: the hostname can match against any of the CN's in the
* server's certificate, not just the first one. This behaviour is
* identical to IE6's behaviour.
*/
public final static SSLHostnameVerifier STRICT_IE6 =
new AbstractVerifier() {
@Override
public final void check(final String[] host, final String[] cns,
final String[] subjectAlts)
throws SSLException {
check(host, cns, subjectAlts, true, true);
}
@Override
public final String toString() { return "STRICT_IE6"; }
};
/**
* The ALLOW_ALL HostnameVerifier essentially turns hostname verification
* off. This implementation is a no-op, and never throws the SSLException.
*/
public final static SSLHostnameVerifier ALLOW_ALL =
new AbstractVerifier() {
@Override
public final void check(final String[] host, final String[] cns,
final String[] subjectAlts) {
// Allow everything - so never blowup.
}
@Override
public final String toString() { return "ALLOW_ALL"; }
};
abstract class AbstractVerifier implements SSLHostnameVerifier {
/**
* This contains a list of 2nd-level domains that aren't allowed to
* have wildcards when combined with country-codes.
* For example: [*.co.uk].
* <p/>
* The [*.co.uk] problem is an interesting one. Should we just hope
* that CA's would never foolishly allow such a certificate to happen?
* Looks like we're the only implementation guarding against this.
* Firefox, Curl, Sun Java 1.4, 5, 6 don't bother with this check.
*/
private final static String[] BAD_COUNTRY_2LDS =
{"ac", "co", "com", "ed", "edu", "go", "gouv", "gov", "info",
"lg", "ne", "net", "or", "org"};
private final static String[] LOCALHOSTS = {"::1", "127.0.0.1",
"localhost",
"localhost.localdomain"};
static {
// Just in case developer forgot to manually sort the array. :-)
Arrays.sort(BAD_COUNTRY_2LDS);
Arrays.sort(LOCALHOSTS);
}
protected AbstractVerifier() {}
/**
* The javax.net.ssl.HostnameVerifier contract.
*
* @param host 'hostname' we used to create our socket
* @param session SSLSession with the remote server
* @return true if the host matched the one in the certificate.
*/
@Override
public boolean verify(String host, SSLSession session) {
try {
Certificate[] certs = session.getPeerCertificates();
X509Certificate x509 = (X509Certificate) certs[0];
check(new String[]{host}, x509);
return true;
}
catch (SSLException e) {
return false;
}
}
@Override
public void check(String host, SSLSocket ssl) throws IOException {
check(new String[]{host}, ssl);
}
@Override
public void check(String host, X509Certificate cert)
throws SSLException {
check(new String[]{host}, cert);
}
@Override
public void check(String host, String[] cns, String[] subjectAlts)
throws SSLException {
check(new String[]{host}, cns, subjectAlts);
}
@Override
public void check(String host[], SSLSocket ssl)
throws IOException {
if (host == null) {
throw new NullPointerException("host to verify is null");
}
SSLSession session = ssl.getSession();
if (session == null) {
// In our experience this only happens under IBM 1.4.x when
// spurious (unrelated) certificates show up in the server'
// chain. Hopefully this will unearth the real problem:
InputStream in = ssl.getInputStream();
in.available();
/*
If you're looking at the 2 lines of code above because
you're running into a problem, you probably have two
options:
#1. Clean up the certificate chain that your server
is presenting (e.g. edit "/etc/apache2/server.crt"
or wherever it is your server's certificate chain
is defined).
OR
#2. Upgrade to an IBM 1.5.x or greater JVM, or switch
to a non-IBM JVM.
*/
// If ssl.getInputStream().available() didn't cause an
// exception, maybe at least now the session is available?
session = ssl.getSession();
if (session == null) {
// If it's still null, probably a startHandshake() will
// unearth the real problem.
ssl.startHandshake();
// Okay, if we still haven't managed to cause an exception,
// might as well go for the NPE. Or maybe we're okay now?
session = ssl.getSession();
}
}
Certificate[] certs;
try {
certs = session.getPeerCertificates();
} catch (SSLPeerUnverifiedException spue) {
InputStream in = ssl.getInputStream();
in.available();
// Didn't trigger anything interesting? Okay, just throw
// original.
throw spue;
}
X509Certificate x509 = (X509Certificate) certs[0];
check(host, x509);
}
@Override
public void check(String[] host, X509Certificate cert)
throws SSLException {
String[] cns = Certificates.getCNs(cert);
String[] subjectAlts = Certificates.getDNSSubjectAlts(cert);
check(host, cns, subjectAlts);
}
public void check(final String[] hosts, final String[] cns,
final String[] subjectAlts, final boolean ie6,
final boolean strictWithSubDomains)
throws SSLException {
// Build up lists of allowed hosts For logging/debugging purposes.
StringBuffer buf = new StringBuffer(32);
buf.append('<');
for (int i = 0; i < hosts.length; i++) {
String h = hosts[i];
h = h != null ? StringUtils.toLowerCase(h.trim()) : "";
hosts[i] = h;
if (i > 0) {
buf.append('/');
}
buf.append(h);
}
buf.append('>');
String hostnames = buf.toString();
// Build the list of names we're going to check. Our DEFAULT and
// STRICT implementations of the HostnameVerifier only use the
// first CN provided. All other CNs are ignored.
// (Firefox, wget, curl, Sun Java 1.4, 5, 6 all work this way).
final Set<String> names = new TreeSet<String>();
if (cns != null && cns.length > 0 && cns[0] != null) {
names.add(cns[0]);
if (ie6) {
for (int i = 1; i < cns.length; i++) {
names.add(cns[i]);
}
}
}
if (subjectAlts != null) {
for (int i = 0; i < subjectAlts.length; i++) {
if (subjectAlts[i] != null) {
names.add(subjectAlts[i]);
}
}
}
if (names.isEmpty()) {
String msg = "Certificate for " + hosts[0] + " doesn't contain CN or DNS subjectAlt";
throw new SSLException(msg);
}
// StringBuffer for building the error message.
buf = new StringBuffer();
boolean match = false;
out:
for (Iterator<String> it = names.iterator(); it.hasNext();) {
// Don't trim the CN, though!
final String cn = StringUtils.toLowerCase(it.next());
// Store CN in StringBuffer in case we need to report an error.
buf.append(" <");
buf.append(cn);
buf.append('>');
if (it.hasNext()) {
buf.append(" OR");
}
// The CN better have at least two dots if it wants wildcard
// action. It also can't be [*.co.uk] or [*.co.jp] or
// [*.org.uk], etc...
boolean doWildcard = cn.startsWith("*.") &&
cn.lastIndexOf('.') >= 0 &&
!isIP4Address(cn) &&
acceptableCountryWildcard(cn);
for (int i = 0; i < hosts.length; i++) {
final String hostName =
StringUtils.toLowerCase(hosts[i].trim());
if (doWildcard) {
match = hostName.endsWith(cn.substring(1));
if (match && strictWithSubDomains) {
// If we're in strict mode, then [*.foo.com] is not
// allowed to match [a.b.foo.com]
match = countDots(hostName) == countDots(cn);
}
} else {
match = hostName.equals(cn);
}
if (match) {
break out;
}
}
}
if (!match) {
throw new SSLException("hostname in certificate didn't match: " + hostnames + " !=" + buf);
}
}
public static boolean isIP4Address(final String cn) {
boolean isIP4 = true;
String tld = cn;
int x = cn.lastIndexOf('.');
// We only bother analyzing the characters after the final dot
// in the name.
if (x >= 0 && x + 1 < cn.length()) {
tld = cn.substring(x + 1);
}
for (int i = 0; i < tld.length(); i++) {
if (!Character.isDigit(tld.charAt(0))) {
isIP4 = false;
break;
}
}
return isIP4;
}
public static boolean acceptableCountryWildcard(final String cn) {
int cnLen = cn.length();
if (cnLen >= 7 && cnLen <= 9) {
// Look for the '.' in the 3rd-last position:
if (cn.charAt(cnLen - 3) == '.') {
// Trim off the [*.] and the [.XX].
String s = cn.substring(2, cnLen - 3);
// And test against the sorted array of bad 2lds:
int x = Arrays.binarySearch(BAD_COUNTRY_2LDS, s);
return x < 0;
}
}
return true;
}
public static boolean isLocalhost(String host) {
host = host != null ? StringUtils.toLowerCase(host.trim()) : "";
if (host.startsWith("::1")) {
int x = host.lastIndexOf('%');
if (x >= 0) {
host = host.substring(0, x);
}
}
int x = Arrays.binarySearch(LOCALHOSTS, host);
return x >= 0;
}
/**
* Counts the number of dots "." in a string.
*
* @param s string to count dots from
* @return number of dots
*/
public static int countDots(final String s) {
int count = 0;
for (int i = 0; i < s.length(); i++) {
if (s.charAt(i) == '.') {
count++;
}
}
return count;
}
}
static class Certificates {
public static String[] getCNs(X509Certificate cert) {
final List<String> cnList = new LinkedList<String>();
/*
Sebastian Hauer's original StrictSSLProtocolSocketFactory used
getName() and had the following comment:
Parses a X.500 distinguished name for the value of the
"Common Name" field. This is done a bit sloppy right
now and should probably be done a bit more according to
<code>RFC 2253</code>.
I've noticed that toString() seems to do a better job than
getName() on these X500Principal objects, so I'm hoping that
addresses Sebastian's concern.
For example, getName() gives me this:
1.2.840.113549.1.9.1=#16166a756c6975736461766965734063756362632e636f6d
whereas toString() gives me this:
[email protected]
Looks like toString() even works with non-ascii domain names!
I tested it with "花子.co.jp" and it worked fine.
*/
String subjectPrincipal = cert.getSubjectX500Principal().toString();
StringTokenizer st = new StringTokenizer(subjectPrincipal, ",");
while (st.hasMoreTokens()) {
String tok = st.nextToken();
int x = tok.indexOf("CN=");
if (x >= 0) {
cnList.add(tok.substring(x + 3));
}
}
if (!cnList.isEmpty()) {
String[] cns = new String[cnList.size()];
cnList.toArray(cns);
return cns;
} else {
return null;
}
}
/**
* Extracts the array of SubjectAlt DNS names from an X509Certificate.
* Returns null if there aren't any.
* <p/>
* Note: Java doesn't appear able to extract international characters
* from the SubjectAlts. It can only extract international characters
* from the CN field.
* <p/>
* (Or maybe the version of OpenSSL I'm using to test isn't storing the
* international characters correctly in the SubjectAlts?).
*
* @param cert X509Certificate
* @return Array of SubjectALT DNS names stored in the certificate.
*/
public static String[] getDNSSubjectAlts(X509Certificate cert) {
final List<String> subjectAltList = new LinkedList<String>();
Collection<List<?>> c = null;
try {
c = cert.getSubjectAlternativeNames();
}
catch (CertificateParsingException cpe) {
// Should probably log.debug() this?
cpe.printStackTrace();
}
if (c != null) {
Iterator<List<?>> it = c.iterator();
while (it.hasNext()) {
List<?> list = it.next();
int type = ((Integer) list.get(0)).intValue();
// If type is 2, then we've got a dNSName
if (type == 2) {
String s = (String) list.get(1);
subjectAltList.add(s);
}
}
}
if (!subjectAltList.isEmpty()) {
String[] subjectAlts = new String[subjectAltList.size()];
subjectAltList.toArray(subjectAlts);
return subjectAlts;
} else {
return null;
}
}
}
}
| 23,635 | 38.197347 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/KeyStoresFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.ssl;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import javax.net.ssl.KeyManager;
import javax.net.ssl.TrustManager;
import java.io.IOException;
import java.security.GeneralSecurityException;
/**
* Interface that gives access to {@link KeyManager} and {@link TrustManager}
* implementations.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface KeyStoresFactory extends Configurable {
/**
* Initializes the keystores of the factory.
*
* @param mode if the keystores are to be used in client or server mode.
* @throws IOException thrown if the keystores could not be initialized due
* to an IO error.
* @throws GeneralSecurityException thrown if the keystores could not be
* initialized due to an security error.
*/
public void init(SSLFactory.Mode mode) throws IOException, GeneralSecurityException;
/**
* Releases any resources being used.
*/
public void destroy();
/**
* Returns the keymanagers for owned certificates.
*
* @return the keymanagers for owned certificates.
*/
public KeyManager[] getKeyManagers();
/**
* Returns the trustmanagers for trusted certificates.
*
* @return the trustmanagers for trusted certificates.
*/
public TrustManager[] getTrustManagers();
}
| 2,224 | 31.720588 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSocketConnectorSecure.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.ssl;
import org.mortbay.jetty.security.SslSocketConnector;
import javax.net.ssl.SSLServerSocket;
import java.io.IOException;
import java.net.ServerSocket;
import java.util.ArrayList;
/**
* This subclass of the Jetty SslSocketConnector exists solely to control
* the TLS protocol versions allowed. This is fallout from the POODLE
* vulnerability (CVE-2014-3566), which requires that SSLv3 be disabled.
* Only TLS 1.0 and later protocols are allowed.
*/
public class SslSocketConnectorSecure extends SslSocketConnector {
public SslSocketConnectorSecure() {
super();
}
/**
* Create a new ServerSocket that will not accept SSLv3 connections,
* but will accept TLSv1.x connections.
*/
protected ServerSocket newServerSocket(String host, int port,int backlog)
throws IOException {
SSLServerSocket socket = (SSLServerSocket)
super.newServerSocket(host, port, backlog);
ArrayList<String> nonSSLProtocols = new ArrayList<String>();
for (String p : socket.getEnabledProtocols()) {
if (!p.contains("SSLv3")) {
nonSSLProtocols.add(p);
}
}
socket.setEnabledProtocols(nonSSLProtocols.toArray(
new String[nonSSLProtocols.size()]));
return socket;
}
}
| 2,091 | 34.457627 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.ssl;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import javax.net.ssl.KeyManager;
import javax.net.ssl.KeyManagerFactory;
import javax.net.ssl.TrustManager;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.security.GeneralSecurityException;
import java.security.KeyStore;
import java.text.MessageFormat;
/**
* {@link KeyStoresFactory} implementation that reads the certificates from
* keystore files.
* <p/>
* if the trust certificates keystore file changes, the {@link TrustManager}
* is refreshed with the new trust certificate entries (using a
* {@link ReloadingX509TrustManager} trustmanager).
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class FileBasedKeyStoresFactory implements KeyStoresFactory {
private static final Log LOG =
LogFactory.getLog(FileBasedKeyStoresFactory.class);
public static final String SSL_KEYSTORE_LOCATION_TPL_KEY =
"ssl.{0}.keystore.location";
public static final String SSL_KEYSTORE_PASSWORD_TPL_KEY =
"ssl.{0}.keystore.password";
public static final String SSL_KEYSTORE_KEYPASSWORD_TPL_KEY =
"ssl.{0}.keystore.keypassword";
public static final String SSL_KEYSTORE_TYPE_TPL_KEY =
"ssl.{0}.keystore.type";
public static final String SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY =
"ssl.{0}.truststore.reload.interval";
public static final String SSL_TRUSTSTORE_LOCATION_TPL_KEY =
"ssl.{0}.truststore.location";
public static final String SSL_TRUSTSTORE_PASSWORD_TPL_KEY =
"ssl.{0}.truststore.password";
public static final String SSL_TRUSTSTORE_TYPE_TPL_KEY =
"ssl.{0}.truststore.type";
/**
* Default format of the keystore files.
*/
public static final String DEFAULT_KEYSTORE_TYPE = "jks";
/**
* Reload interval in milliseconds.
*/
public static final int DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL = 10000;
private Configuration conf;
private KeyManager[] keyManagers;
private TrustManager[] trustManagers;
private ReloadingX509TrustManager trustManager;
/**
* Resolves a property name to its client/server version if applicable.
* <p/>
* NOTE: This method is public for testing purposes.
*
* @param mode client/server mode.
* @param template property name template.
* @return the resolved property name.
*/
@VisibleForTesting
public static String resolvePropertyName(SSLFactory.Mode mode,
String template) {
return MessageFormat.format(
template, StringUtils.toLowerCase(mode.toString()));
}
/**
* Sets the configuration for the factory.
*
* @param conf the configuration for the factory.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* Returns the configuration of the factory.
*
* @return the configuration of the factory.
*/
@Override
public Configuration getConf() {
return conf;
}
/**
* Initializes the keystores of the factory.
*
* @param mode if the keystores are to be used in client or server mode.
* @throws IOException thrown if the keystores could not be initialized due
* to an IO error.
* @throws GeneralSecurityException thrown if the keystores could not be
* initialized due to a security error.
*/
@Override
public void init(SSLFactory.Mode mode)
throws IOException, GeneralSecurityException {
boolean requireClientCert =
conf.getBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY,
SSLFactory.DEFAULT_SSL_REQUIRE_CLIENT_CERT);
// certificate store
String keystoreType =
conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY),
DEFAULT_KEYSTORE_TYPE);
KeyStore keystore = KeyStore.getInstance(keystoreType);
String keystoreKeyPassword = null;
if (requireClientCert || mode == SSLFactory.Mode.SERVER) {
String locationProperty =
resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY);
String keystoreLocation = conf.get(locationProperty, "");
if (keystoreLocation.isEmpty()) {
throw new GeneralSecurityException("The property '" + locationProperty +
"' has not been set in the ssl configuration file.");
}
String passwordProperty =
resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY);
String keystorePassword = getPassword(conf, passwordProperty, "");
if (keystorePassword.isEmpty()) {
throw new GeneralSecurityException("The property '" + passwordProperty +
"' has not been set in the ssl configuration file.");
}
String keyPasswordProperty =
resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY);
// Key password defaults to the same value as store password for
// compatibility with legacy configurations that did not use a separate
// configuration property for key password.
keystoreKeyPassword = getPassword(
conf, keyPasswordProperty, keystorePassword);
if (LOG.isDebugEnabled()) {
LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation);
}
InputStream is = new FileInputStream(keystoreLocation);
try {
keystore.load(is, keystorePassword.toCharArray());
} finally {
is.close();
}
if (LOG.isDebugEnabled()) {
LOG.debug(mode.toString() + " Loaded KeyStore: " + keystoreLocation);
}
} else {
keystore.load(null, null);
}
KeyManagerFactory keyMgrFactory = KeyManagerFactory
.getInstance(SSLFactory.SSLCERTIFICATE);
keyMgrFactory.init(keystore, (keystoreKeyPassword != null) ?
keystoreKeyPassword.toCharArray() : null);
keyManagers = keyMgrFactory.getKeyManagers();
//trust store
String truststoreType =
conf.get(resolvePropertyName(mode, SSL_TRUSTSTORE_TYPE_TPL_KEY),
DEFAULT_KEYSTORE_TYPE);
String locationProperty =
resolvePropertyName(mode, SSL_TRUSTSTORE_LOCATION_TPL_KEY);
String truststoreLocation = conf.get(locationProperty, "");
if (!truststoreLocation.isEmpty()) {
String passwordProperty = resolvePropertyName(mode,
SSL_TRUSTSTORE_PASSWORD_TPL_KEY);
String truststorePassword = getPassword(conf, passwordProperty, "");
if (truststorePassword.isEmpty()) {
throw new GeneralSecurityException("The property '" + passwordProperty +
"' has not been set in the ssl configuration file.");
}
long truststoreReloadInterval =
conf.getLong(
resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY),
DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL);
if (LOG.isDebugEnabled()) {
LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation);
}
trustManager = new ReloadingX509TrustManager(truststoreType,
truststoreLocation,
truststorePassword,
truststoreReloadInterval);
trustManager.init();
if (LOG.isDebugEnabled()) {
LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
}
trustManagers = new TrustManager[]{trustManager};
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("The property '" + locationProperty + "' has not been set, " +
"no TrustStore will be loaded");
}
trustManagers = null;
}
}
String getPassword(Configuration conf, String alias, String defaultPass) {
String password = defaultPass;
try {
char[] passchars = conf.getPassword(alias);
if (passchars != null) {
password = new String(passchars);
}
}
catch (IOException ioe) {
LOG.warn("Exception while trying to get password for alias " + alias +
": " + ioe.getMessage());
}
return password;
}
/**
* Releases any resources being used.
*/
@Override
public synchronized void destroy() {
if (trustManager != null) {
trustManager.destroy();
trustManager = null;
keyManagers = null;
trustManagers = null;
}
}
/**
* Returns the keymanagers for owned certificates.
*
* @return the keymanagers for owned certificates.
*/
@Override
public KeyManager[] getKeyManagers() {
return keyManagers;
}
/**
* Returns the trustmanagers for trusted certificates.
*
* @return the trustmanagers for trusted certificates.
*/
@Override
public TrustManager[] getTrustManagers() {
return trustManagers;
}
}
| 9,659 | 33.255319 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/ReloadingX509TrustManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.ssl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
import javax.net.ssl.X509TrustManager;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.security.KeyStore;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
import java.util.concurrent.atomic.AtomicReference;
/**
* A {@link TrustManager} implementation that reloads its configuration when
* the truststore file on disk changes.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public final class ReloadingX509TrustManager
implements X509TrustManager, Runnable {
private static final Log LOG =
LogFactory.getLog(ReloadingX509TrustManager.class);
private String type;
private File file;
private String password;
private long lastLoaded;
private long reloadInterval;
private AtomicReference<X509TrustManager> trustManagerRef;
private volatile boolean running;
private Thread reloader;
/**
* Creates a reloadable trustmanager. The trustmanager reloads itself
* if the underlying trustore file has changed.
*
* @param type type of truststore file, typically 'jks'.
* @param location local path to the truststore file.
* @param password password of the truststore file.
* @param reloadInterval interval to check if the truststore file has
* changed, in milliseconds.
* @throws IOException thrown if the truststore could not be initialized due
* to an IO error.
* @throws GeneralSecurityException thrown if the truststore could not be
* initialized due to a security error.
*/
public ReloadingX509TrustManager(String type, String location,
String password, long reloadInterval)
throws IOException, GeneralSecurityException {
this.type = type;
file = new File(location);
this.password = password;
trustManagerRef = new AtomicReference<X509TrustManager>();
trustManagerRef.set(loadTrustManager());
this.reloadInterval = reloadInterval;
}
/**
* Starts the reloader thread.
*/
public void init() {
reloader = new Thread(this, "Truststore reloader thread");
reloader.setDaemon(true);
running = true;
reloader.start();
}
/**
* Stops the reloader thread.
*/
public void destroy() {
running = false;
reloader.interrupt();
}
/**
* Returns the reload check interval.
*
* @return the reload check interval, in milliseconds.
*/
public long getReloadInterval() {
return reloadInterval;
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String authType)
throws CertificateException {
X509TrustManager tm = trustManagerRef.get();
if (tm != null) {
tm.checkClientTrusted(chain, authType);
} else {
throw new CertificateException("Unknown client chain certificate: " +
chain[0].toString());
}
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String authType)
throws CertificateException {
X509TrustManager tm = trustManagerRef.get();
if (tm != null) {
tm.checkServerTrusted(chain, authType);
} else {
throw new CertificateException("Unknown server chain certificate: " +
chain[0].toString());
}
}
private static final X509Certificate[] EMPTY = new X509Certificate[0];
@Override
public X509Certificate[] getAcceptedIssuers() {
X509Certificate[] issuers = EMPTY;
X509TrustManager tm = trustManagerRef.get();
if (tm != null) {
issuers = tm.getAcceptedIssuers();
}
return issuers;
}
boolean needsReload() {
boolean reload = true;
if (file.exists()) {
if (file.lastModified() == lastLoaded) {
reload = false;
}
} else {
lastLoaded = 0;
}
return reload;
}
X509TrustManager loadTrustManager()
throws IOException, GeneralSecurityException {
X509TrustManager trustManager = null;
KeyStore ks = KeyStore.getInstance(type);
lastLoaded = file.lastModified();
FileInputStream in = new FileInputStream(file);
try {
ks.load(in, password.toCharArray());
LOG.debug("Loaded truststore '" + file + "'");
} finally {
in.close();
}
TrustManagerFactory trustManagerFactory =
TrustManagerFactory.getInstance(SSLFactory.SSLCERTIFICATE);
trustManagerFactory.init(ks);
TrustManager[] trustManagers = trustManagerFactory.getTrustManagers();
for (TrustManager trustManager1 : trustManagers) {
if (trustManager1 instanceof X509TrustManager) {
trustManager = (X509TrustManager) trustManager1;
break;
}
}
return trustManager;
}
@Override
public void run() {
while (running) {
try {
Thread.sleep(reloadInterval);
} catch (InterruptedException e) {
//NOP
}
if (running && needsReload()) {
try {
trustManagerRef.set(loadTrustManager());
} catch (Exception ex) {
LOG.warn("Could not load truststore (keep using existing one) : " +
ex.toString(), ex);
}
}
}
}
}
| 6,302 | 29.746341 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.ssl;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLServerSocketFactory;
import javax.net.ssl.SSLSocketFactory;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.security.GeneralSecurityException;
/**
* Factory that creates SSLEngine and SSLSocketFactory instances using
* Hadoop configuration information.
* <p/>
* This SSLFactory uses a {@link ReloadingX509TrustManager} instance,
* which reloads public keys if the truststore file changes.
* <p/>
* This factory is used to configure HTTPS in Hadoop HTTP based endpoints, both
* client and server.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class SSLFactory implements ConnectionConfigurator {
@InterfaceAudience.Private
public static enum Mode { CLIENT, SERVER }
public static final String SSL_REQUIRE_CLIENT_CERT_KEY =
"hadoop.ssl.require.client.cert";
public static final String SSL_HOSTNAME_VERIFIER_KEY =
"hadoop.ssl.hostname.verifier";
public static final String SSL_CLIENT_CONF_KEY =
"hadoop.ssl.client.conf";
public static final String SSL_SERVER_CONF_KEY =
"hadoop.ssl.server.conf";
public static final String SSLCERTIFICATE = IBM_JAVA?"ibmX509":"SunX509";
public static final boolean DEFAULT_SSL_REQUIRE_CLIENT_CERT = false;
public static final String KEYSTORES_FACTORY_CLASS_KEY =
"hadoop.ssl.keystores.factory.class";
public static final String SSL_ENABLED_PROTOCOLS =
"hadoop.ssl.enabled.protocols";
public static final String DEFAULT_SSL_ENABLED_PROTOCOLS = "TLSv1";
private Configuration conf;
private Mode mode;
private boolean requireClientCert;
private SSLContext context;
private HostnameVerifier hostnameVerifier;
private KeyStoresFactory keystoresFactory;
private String[] enabledProtocols = null;
/**
* Creates an SSLFactory.
*
* @param mode SSLFactory mode, client or server.
* @param conf Hadoop configuration from where the SSLFactory configuration
* will be read.
*/
public SSLFactory(Mode mode, Configuration conf) {
this.conf = conf;
if (mode == null) {
throw new IllegalArgumentException("mode cannot be NULL");
}
this.mode = mode;
requireClientCert = conf.getBoolean(SSL_REQUIRE_CLIENT_CERT_KEY,
DEFAULT_SSL_REQUIRE_CLIENT_CERT);
Configuration sslConf = readSSLConfiguration(mode);
Class<? extends KeyStoresFactory> klass
= conf.getClass(KEYSTORES_FACTORY_CLASS_KEY,
FileBasedKeyStoresFactory.class, KeyStoresFactory.class);
keystoresFactory = ReflectionUtils.newInstance(klass, sslConf);
enabledProtocols = conf.getStrings(SSL_ENABLED_PROTOCOLS,
DEFAULT_SSL_ENABLED_PROTOCOLS);
}
private Configuration readSSLConfiguration(Mode mode) {
Configuration sslConf = new Configuration(false);
sslConf.setBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, requireClientCert);
String sslConfResource;
if (mode == Mode.CLIENT) {
sslConfResource = conf.get(SSL_CLIENT_CONF_KEY, "ssl-client.xml");
} else {
sslConfResource = conf.get(SSL_SERVER_CONF_KEY, "ssl-server.xml");
}
sslConf.addResource(sslConfResource);
return sslConf;
}
/**
* Initializes the factory.
*
* @throws GeneralSecurityException thrown if an SSL initialization error
* happened.
* @throws IOException thrown if an IO error happened while reading the SSL
* configuration.
*/
public void init() throws GeneralSecurityException, IOException {
keystoresFactory.init(mode);
context = SSLContext.getInstance("TLS");
context.init(keystoresFactory.getKeyManagers(),
keystoresFactory.getTrustManagers(), null);
context.getDefaultSSLParameters().setProtocols(enabledProtocols);
hostnameVerifier = getHostnameVerifier(conf);
}
private HostnameVerifier getHostnameVerifier(Configuration conf)
throws GeneralSecurityException, IOException {
return getHostnameVerifier(StringUtils.toUpperCase(
conf.get(SSL_HOSTNAME_VERIFIER_KEY, "DEFAULT").trim()));
}
public static HostnameVerifier getHostnameVerifier(String verifier)
throws GeneralSecurityException, IOException {
HostnameVerifier hostnameVerifier;
if (verifier.equals("DEFAULT")) {
hostnameVerifier = SSLHostnameVerifier.DEFAULT;
} else if (verifier.equals("DEFAULT_AND_LOCALHOST")) {
hostnameVerifier = SSLHostnameVerifier.DEFAULT_AND_LOCALHOST;
} else if (verifier.equals("STRICT")) {
hostnameVerifier = SSLHostnameVerifier.STRICT;
} else if (verifier.equals("STRICT_IE6")) {
hostnameVerifier = SSLHostnameVerifier.STRICT_IE6;
} else if (verifier.equals("ALLOW_ALL")) {
hostnameVerifier = SSLHostnameVerifier.ALLOW_ALL;
} else {
throw new GeneralSecurityException("Invalid hostname verifier: " +
verifier);
}
return hostnameVerifier;
}
/**
* Releases any resources being used.
*/
public void destroy() {
keystoresFactory.destroy();
}
/**
* Returns the SSLFactory KeyStoresFactory instance.
*
* @return the SSLFactory KeyStoresFactory instance.
*/
public KeyStoresFactory getKeystoresFactory() {
return keystoresFactory;
}
/**
* Returns a configured SSLEngine.
*
* @return the configured SSLEngine.
* @throws GeneralSecurityException thrown if the SSL engine could not
* be initialized.
* @throws IOException thrown if and IO error occurred while loading
* the server keystore.
*/
public SSLEngine createSSLEngine()
throws GeneralSecurityException, IOException {
SSLEngine sslEngine = context.createSSLEngine();
if (mode == Mode.CLIENT) {
sslEngine.setUseClientMode(true);
} else {
sslEngine.setUseClientMode(false);
sslEngine.setNeedClientAuth(requireClientCert);
}
sslEngine.setEnabledProtocols(enabledProtocols);
return sslEngine;
}
/**
* Returns a configured SSLServerSocketFactory.
*
* @return the configured SSLSocketFactory.
* @throws GeneralSecurityException thrown if the SSLSocketFactory could not
* be initialized.
* @throws IOException thrown if and IO error occurred while loading
* the server keystore.
*/
public SSLServerSocketFactory createSSLServerSocketFactory()
throws GeneralSecurityException, IOException {
if (mode != Mode.SERVER) {
throw new IllegalStateException("Factory is in CLIENT mode");
}
return context.getServerSocketFactory();
}
/**
* Returns a configured SSLSocketFactory.
*
* @return the configured SSLSocketFactory.
* @throws GeneralSecurityException thrown if the SSLSocketFactory could not
* be initialized.
* @throws IOException thrown if and IO error occurred while loading
* the server keystore.
*/
public SSLSocketFactory createSSLSocketFactory()
throws GeneralSecurityException, IOException {
if (mode != Mode.CLIENT) {
throw new IllegalStateException("Factory is in CLIENT mode");
}
return context.getSocketFactory();
}
/**
* Returns the hostname verifier it should be used in HttpsURLConnections.
*
* @return the hostname verifier.
*/
public HostnameVerifier getHostnameVerifier() {
if (mode != Mode.CLIENT) {
throw new IllegalStateException("Factory is in CLIENT mode");
}
return hostnameVerifier;
}
/**
* Returns if client certificates are required or not.
*
* @return if client certificates are required or not.
*/
public boolean isClientCertRequired() {
return requireClientCert;
}
/**
* If the given {@link HttpURLConnection} is an {@link HttpsURLConnection}
* configures the connection with the {@link SSLSocketFactory} and
* {@link HostnameVerifier} of this SSLFactory, otherwise does nothing.
*
* @param conn the {@link HttpURLConnection} instance to configure.
* @return the configured {@link HttpURLConnection} instance.
*
* @throws IOException if an IO error occurred.
*/
@Override
public HttpURLConnection configure(HttpURLConnection conn)
throws IOException {
if (conn instanceof HttpsURLConnection) {
HttpsURLConnection sslConn = (HttpsURLConnection) conn;
try {
sslConn.setSSLSocketFactory(createSSLSocketFactory());
} catch (GeneralSecurityException ex) {
throw new IOException(ex);
}
sslConn.setHostnameVerifier(getHostnameVerifier());
conn = sslConn;
}
return conn;
}
}
| 9,866 | 33.865724 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Collection;
import java.util.Comparator;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.ReflectionUtils;
/**
* A Writable SortedMap.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SortedMapWritable extends AbstractMapWritable
implements SortedMap<WritableComparable, Writable> {
private SortedMap<WritableComparable, Writable> instance;
/** default constructor. */
public SortedMapWritable() {
super();
this.instance = new TreeMap<WritableComparable, Writable>();
}
/**
* Copy constructor.
*
* @param other the map to copy from
*/
public SortedMapWritable(SortedMapWritable other) {
this();
copy(other);
}
@Override
public Comparator<? super WritableComparable> comparator() {
// Returning null means we use the natural ordering of the keys
return null;
}
@Override
public WritableComparable firstKey() {
return instance.firstKey();
}
@Override
public SortedMap<WritableComparable, Writable>
headMap(WritableComparable toKey) {
return instance.headMap(toKey);
}
@Override
public WritableComparable lastKey() {
return instance.lastKey();
}
@Override
public SortedMap<WritableComparable, Writable>
subMap(WritableComparable fromKey, WritableComparable toKey) {
return instance.subMap(fromKey, toKey);
}
@Override
public SortedMap<WritableComparable, Writable>
tailMap(WritableComparable fromKey) {
return instance.tailMap(fromKey);
}
@Override
public void clear() {
instance.clear();
}
@Override
public boolean containsKey(Object key) {
return instance.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return instance.containsValue(value);
}
@Override
public Set<java.util.Map.Entry<WritableComparable, Writable>> entrySet() {
return instance.entrySet();
}
@Override
public Writable get(Object key) {
return instance.get(key);
}
@Override
public boolean isEmpty() {
return instance.isEmpty();
}
@Override
public Set<WritableComparable> keySet() {
return instance.keySet();
}
@Override
public Writable put(WritableComparable key, Writable value) {
addToMap(key.getClass());
addToMap(value.getClass());
return instance.put(key, value);
}
@Override
public void putAll(Map<? extends WritableComparable, ? extends Writable> t) {
for (Map.Entry<? extends WritableComparable, ? extends Writable> e:
t.entrySet()) {
put(e.getKey(), e.getValue());
}
}
@Override
public Writable remove(Object key) {
return instance.remove(key);
}
@Override
public int size() {
return instance.size();
}
@Override
public Collection<Writable> values() {
return instance.values();
}
@SuppressWarnings("unchecked")
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
// Read the number of entries in the map
int entries = in.readInt();
// Then read each key/value pair
for (int i = 0; i < entries; i++) {
WritableComparable key =
(WritableComparable) ReflectionUtils.newInstance(getClass(
in.readByte()), getConf());
key.readFields(in);
Writable value = (Writable) ReflectionUtils.newInstance(getClass(
in.readByte()), getConf());
value.readFields(in);
instance.put(key, value);
}
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
// Write out the number of entries in the map
out.writeInt(instance.size());
// Then write out each key/value pair
for (Map.Entry<WritableComparable, Writable> e: instance.entrySet()) {
out.writeByte(getId(e.getKey().getClass()));
e.getKey().write(out);
out.writeByte(getId(e.getValue().getClass()));
e.getValue().write(out);
}
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj instanceof SortedMapWritable) {
Map map = (Map) obj;
if (size() != map.size()) {
return false;
}
return entrySet().equals(map.entrySet());
}
return false;
}
@Override
public int hashCode() {
return instance.hashCode();
}
}
| 5,497 | 22.904348 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BinaryComparable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Interface supported by {@link org.apache.hadoop.io.WritableComparable}
* types supporting ordering/permutation by a representative set of bytes.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class BinaryComparable implements Comparable<BinaryComparable> {
/**
* Return n st bytes 0..n-1 from {#getBytes()} are valid.
*/
public abstract int getLength();
/**
* Return representative byte array for this instance.
*/
public abstract byte[] getBytes();
/**
* Compare bytes from {#getBytes()}.
* @see org.apache.hadoop.io.WritableComparator#compareBytes(byte[],int,int,byte[],int,int)
*/
@Override
public int compareTo(BinaryComparable other) {
if (this == other)
return 0;
return WritableComparator.compareBytes(getBytes(), 0, getLength(),
other.getBytes(), 0, other.getLength());
}
/**
* Compare bytes from {#getBytes()} to those provided.
*/
public int compareTo(byte[] other, int off, int len) {
return WritableComparator.compareBytes(getBytes(), 0, getLength(),
other, off, len);
}
/**
* Return true if bytes from {#getBytes()} match.
*/
@Override
public boolean equals(Object other) {
if (!(other instanceof BinaryComparable))
return false;
BinaryComparable that = (BinaryComparable)other;
if (this.getLength() != that.getLength())
return false;
return this.compareTo(that) == 0;
}
/**
* Return a hash of the bytes returned from {#getBytes()}.
* @see org.apache.hadoop.io.WritableComparator#hashBytes(byte[],int)
*/
@Override
public int hashCode() {
return WritableComparator.hashBytes(getBytes(), getLength());
}
}
| 2,674 | 30.470588 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataInputByteBuffer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInputStream;
import java.io.InputStream;
import java.nio.ByteBuffer;
public class DataInputByteBuffer extends DataInputStream {
private static class Buffer extends InputStream {
private final byte[] scratch = new byte[1];
ByteBuffer[] buffers = new ByteBuffer[0];
int bidx, pos, length;
@Override
public int read() {
if (-1 == read(scratch, 0, 1)) {
return -1;
}
return scratch[0] & 0xFF;
}
@Override
public int read(byte[] b, int off, int len) {
if (bidx >= buffers.length) {
return -1;
}
int cur = 0;
do {
int rem = Math.min(len, buffers[bidx].remaining());
buffers[bidx].get(b, off, rem);
cur += rem;
off += rem;
len -= rem;
} while (len > 0 && ++bidx < buffers.length);
pos += cur;
return cur;
}
public void reset(ByteBuffer[] buffers) {
bidx = pos = length = 0;
this.buffers = buffers;
for (ByteBuffer b : buffers) {
length += b.remaining();
}
}
public int getPosition() {
return pos;
}
public int getLength() {
return length;
}
public ByteBuffer[] getData() {
return buffers;
}
}
private Buffer buffers;
public DataInputByteBuffer() {
this(new Buffer());
}
private DataInputByteBuffer(Buffer buffers) {
super(buffers);
this.buffers = buffers;
}
public void reset(ByteBuffer... input) {
buffers.reset(input);
}
public ByteBuffer[] getData() {
return buffers.getData();
}
public int getPosition() {
return buffers.getPosition();
}
public int getLength() {
return buffers.getLength();
}
}
| 2,551 | 24.777778 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputBuffer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Preconditions;
/** A reusable {@link DataOutput} implementation that writes to an in-memory
* buffer.
*
* <p>This saves memory over creating a new DataOutputStream and
* ByteArrayOutputStream each time data is written.
*
* <p>Typical usage is something like the following:<pre>
*
* DataOutputBuffer buffer = new DataOutputBuffer();
* while (... loop condition ...) {
* buffer.reset();
* ... write buffer using DataOutput methods ...
* byte[] data = buffer.getData();
* int dataLength = buffer.getLength();
* ... write data to its ultimate destination ...
* }
* </pre>
*
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class DataOutputBuffer extends DataOutputStream {
private static class Buffer extends ByteArrayOutputStream {
public byte[] getData() { return buf; }
public int getLength() { return count; }
public Buffer() {
super();
}
public Buffer(int size) {
super(size);
}
public void write(DataInput in, int len) throws IOException {
int newcount = count + len;
if (newcount > buf.length) {
byte newbuf[] = new byte[Math.max(buf.length << 1, newcount)];
System.arraycopy(buf, 0, newbuf, 0, count);
buf = newbuf;
}
in.readFully(buf, count, len);
count = newcount;
}
/**
* Set the count for the current buf.
* @param newCount the new count to set
* @return the original count
*/
private int setCount(int newCount) {
Preconditions.checkArgument(newCount >= 0 && newCount <= buf.length);
int oldCount = count;
count = newCount;
return oldCount;
}
}
private Buffer buffer;
/** Constructs a new empty buffer. */
public DataOutputBuffer() {
this(new Buffer());
}
public DataOutputBuffer(int size) {
this(new Buffer(size));
}
private DataOutputBuffer(Buffer buffer) {
super(buffer);
this.buffer = buffer;
}
/** Returns the current contents of the buffer.
* Data is only valid to {@link #getLength()}.
*/
public byte[] getData() { return buffer.getData(); }
/** Returns the length of the valid data currently in the buffer. */
public int getLength() { return buffer.getLength(); }
/** Resets the buffer to empty. */
public DataOutputBuffer reset() {
this.written = 0;
buffer.reset();
return this;
}
/** Writes bytes from a DataInput directly into the buffer. */
public void write(DataInput in, int length) throws IOException {
buffer.write(in, length);
}
/** Write to a file stream */
public void writeTo(OutputStream out) throws IOException {
buffer.writeTo(out);
}
/**
* Overwrite an integer into the internal buffer. Note that this call can only
* be used to overwrite existing data in the buffer, i.e., buffer#count cannot
* be increased, and DataOutputStream#written cannot be increased.
*/
public void writeInt(int v, int offset) throws IOException {
Preconditions.checkState(offset + 4 <= buffer.getLength());
byte[] b = new byte[4];
b[0] = (byte) ((v >>> 24) & 0xFF);
b[1] = (byte) ((v >>> 16) & 0xFF);
b[2] = (byte) ((v >>> 8) & 0xFF);
b[3] = (byte) ((v >>> 0) & 0xFF);
int oldCount = buffer.setCount(offset);
buffer.write(b);
buffer.setCount(oldCount);
}
}
| 4,378 | 29.2 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import com.google.common.annotations.VisibleForTesting;
/**
* Abstract base class for MapWritable and SortedMapWritable
*
* Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of
* MapWritable<Writable, MapWritable> so the CLASS_TO_ID and ID_TO_CLASS
* maps travel with the class instead of being static.
*
* Class ids range from 1 to 127 so there can be at most 127 distinct classes
* in any specific map instance.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class AbstractMapWritable implements Writable, Configurable {
private AtomicReference<Configuration> conf;
/* Class to id mappings */
@VisibleForTesting
Map<Class<?>, Byte> classToIdMap = new ConcurrentHashMap<Class<?>, Byte>();
/* Id to Class mappings */
@VisibleForTesting
Map<Byte, Class<?>> idToClassMap = new ConcurrentHashMap<Byte, Class<?>>();
/* The number of new classes (those not established by the constructor) */
private volatile byte newClasses = 0;
/** @return the number of known classes */
byte getNewClasses() {
return newClasses;
}
/**
* Used to add "predefined" classes and by Writable to copy "new" classes.
*/
private synchronized void addToMap(Class<?> clazz, byte id) {
if (classToIdMap.containsKey(clazz)) {
byte b = classToIdMap.get(clazz);
if (b != id) {
throw new IllegalArgumentException ("Class " + clazz.getName() +
" already registered but maps to " + b + " and not " + id);
}
}
if (idToClassMap.containsKey(id)) {
Class<?> c = idToClassMap.get(id);
if (!c.equals(clazz)) {
throw new IllegalArgumentException("Id " + id + " exists but maps to " +
c.getName() + " and not " + clazz.getName());
}
}
classToIdMap.put(clazz, id);
idToClassMap.put(id, clazz);
}
/** Add a Class to the maps if it is not already present. */
protected synchronized void addToMap(Class<?> clazz) {
if (classToIdMap.containsKey(clazz)) {
return;
}
if (newClasses + 1 > Byte.MAX_VALUE) {
throw new IndexOutOfBoundsException("adding an additional class would" +
" exceed the maximum number allowed");
}
byte id = ++newClasses;
addToMap(clazz, id);
}
/** @return the Class class for the specified id */
protected Class<?> getClass(byte id) {
return idToClassMap.get(id);
}
/** @return the id for the specified Class */
protected byte getId(Class<?> clazz) {
return classToIdMap.containsKey(clazz) ? classToIdMap.get(clazz) : -1;
}
/** Used by child copy constructors. */
protected synchronized void copy(Writable other) {
if (other != null) {
try {
DataOutputBuffer out = new DataOutputBuffer();
other.write(out);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
readFields(in);
} catch (IOException e) {
throw new IllegalArgumentException("map cannot be copied: " +
e.getMessage());
}
} else {
throw new IllegalArgumentException("source map cannot be null");
}
}
/** constructor. */
protected AbstractMapWritable() {
this.conf = new AtomicReference<Configuration>();
addToMap(ArrayWritable.class,
Byte.valueOf(Integer.valueOf(-127).byteValue()));
addToMap(BooleanWritable.class,
Byte.valueOf(Integer.valueOf(-126).byteValue()));
addToMap(BytesWritable.class,
Byte.valueOf(Integer.valueOf(-125).byteValue()));
addToMap(FloatWritable.class,
Byte.valueOf(Integer.valueOf(-124).byteValue()));
addToMap(IntWritable.class,
Byte.valueOf(Integer.valueOf(-123).byteValue()));
addToMap(LongWritable.class,
Byte.valueOf(Integer.valueOf(-122).byteValue()));
addToMap(MapWritable.class,
Byte.valueOf(Integer.valueOf(-121).byteValue()));
addToMap(MD5Hash.class,
Byte.valueOf(Integer.valueOf(-120).byteValue()));
addToMap(NullWritable.class,
Byte.valueOf(Integer.valueOf(-119).byteValue()));
addToMap(ObjectWritable.class,
Byte.valueOf(Integer.valueOf(-118).byteValue()));
addToMap(SortedMapWritable.class,
Byte.valueOf(Integer.valueOf(-117).byteValue()));
addToMap(Text.class,
Byte.valueOf(Integer.valueOf(-116).byteValue()));
addToMap(TwoDArrayWritable.class,
Byte.valueOf(Integer.valueOf(-115).byteValue()));
// UTF8 is deprecated so we don't support it
addToMap(VIntWritable.class,
Byte.valueOf(Integer.valueOf(-114).byteValue()));
addToMap(VLongWritable.class,
Byte.valueOf(Integer.valueOf(-113).byteValue()));
}
/** @return the conf */
@Override
public Configuration getConf() {
return conf.get();
}
/** @param conf the conf to set */
@Override
public void setConf(Configuration conf) {
this.conf.set(conf);
}
@Override
public void write(DataOutput out) throws IOException {
// First write out the size of the class table and any classes that are
// "unknown" classes
out.writeByte(newClasses);
for (byte i = 1; i <= newClasses; i++) {
out.writeByte(i);
out.writeUTF(getClass(i).getName());
}
}
@Override
public void readFields(DataInput in) throws IOException {
// Get the number of "unknown" classes
newClasses = in.readByte();
// Then read in the class names and add them to our tables
for (int i = 0; i < newClasses; i++) {
byte id = in.readByte();
String className = in.readUTF();
try {
addToMap(Class.forName(className), id);
} catch (ClassNotFoundException e) {
throw new IOException("can't find class: " + className + " because "+
e.getMessage());
}
}
}
}
| 7,118 | 31.655963 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ByteBufferPool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface ByteBufferPool {
/**
* Get a new direct ByteBuffer. The pool can provide this from
* removing a buffer from its internal cache, or by allocating a
* new buffer.
*
* @param direct Whether the buffer should be direct.
* @param length The minimum length the buffer will have.
* @return A new ByteBuffer. This ByteBuffer must be direct.
* Its capacity can be less than what was requested, but
* must be at least 1 byte.
*/
ByteBuffer getBuffer(boolean direct, int length);
/**
* Release a buffer back to the pool.
* The pool may choose to put this buffer into its cache.
*
* @param buffer a direct bytebuffer
*/
void putBuffer(ByteBuffer buffer);
}
| 1,816 | 36.081633 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ObjectWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.lang.reflect.Array;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.io.*;
import java.util.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.ProtoUtil;
import com.google.protobuf.Message;
/** A polymorphic Writable that writes an instance with it's class name.
* Handles arrays, strings and primitive types without a Writable wrapper.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ObjectWritable implements Writable, Configurable {
private Class declaredClass;
private Object instance;
private Configuration conf;
public ObjectWritable() {}
public ObjectWritable(Object instance) {
set(instance);
}
public ObjectWritable(Class declaredClass, Object instance) {
this.declaredClass = declaredClass;
this.instance = instance;
}
/** Return the instance, or null if none. */
public Object get() { return instance; }
/** Return the class this is meant to be. */
public Class getDeclaredClass() { return declaredClass; }
/** Reset the instance. */
public void set(Object instance) {
this.declaredClass = instance.getClass();
this.instance = instance;
}
@Override
public String toString() {
return "OW[class=" + declaredClass + ",value=" + instance + "]";
}
@Override
public void readFields(DataInput in) throws IOException {
readObject(in, this, this.conf);
}
@Override
public void write(DataOutput out) throws IOException {
writeObject(out, instance, declaredClass, conf);
}
private static final Map<String, Class<?>> PRIMITIVE_NAMES = new HashMap<String, Class<?>>();
static {
PRIMITIVE_NAMES.put("boolean", Boolean.TYPE);
PRIMITIVE_NAMES.put("byte", Byte.TYPE);
PRIMITIVE_NAMES.put("char", Character.TYPE);
PRIMITIVE_NAMES.put("short", Short.TYPE);
PRIMITIVE_NAMES.put("int", Integer.TYPE);
PRIMITIVE_NAMES.put("long", Long.TYPE);
PRIMITIVE_NAMES.put("float", Float.TYPE);
PRIMITIVE_NAMES.put("double", Double.TYPE);
PRIMITIVE_NAMES.put("void", Void.TYPE);
}
private static class NullInstance extends Configured implements Writable {
private Class<?> declaredClass;
public NullInstance() { super(null); }
public NullInstance(Class declaredClass, Configuration conf) {
super(conf);
this.declaredClass = declaredClass;
}
@Override
public void readFields(DataInput in) throws IOException {
String className = UTF8.readString(in);
declaredClass = PRIMITIVE_NAMES.get(className);
if (declaredClass == null) {
try {
declaredClass = getConf().getClassByName(className);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e.toString());
}
}
}
@Override
public void write(DataOutput out) throws IOException {
UTF8.writeString(out, declaredClass.getName());
}
}
/** Write a {@link Writable}, {@link String}, primitive type, or an array of
* the preceding. */
public static void writeObject(DataOutput out, Object instance,
Class declaredClass,
Configuration conf) throws IOException {
writeObject(out, instance, declaredClass, conf, false);
}
/**
* Write a {@link Writable}, {@link String}, primitive type, or an array of
* the preceding.
*
* @param allowCompactArrays - set true for RPC and internal or intra-cluster
* usages. Set false for inter-cluster, File, and other persisted output
* usages, to preserve the ability to interchange files with other clusters
* that may not be running the same version of software. Sometime in ~2013
* we can consider removing this parameter and always using the compact format.
*/
public static void writeObject(DataOutput out, Object instance,
Class declaredClass, Configuration conf, boolean allowCompactArrays)
throws IOException {
if (instance == null) { // null
instance = new NullInstance(declaredClass, conf);
declaredClass = Writable.class;
}
// Special case: must come before writing out the declaredClass.
// If this is an eligible array of primitives,
// wrap it in an ArrayPrimitiveWritable$Internal wrapper class.
if (allowCompactArrays && declaredClass.isArray()
&& instance.getClass().getName().equals(declaredClass.getName())
&& instance.getClass().getComponentType().isPrimitive()) {
instance = new ArrayPrimitiveWritable.Internal(instance);
declaredClass = ArrayPrimitiveWritable.Internal.class;
}
UTF8.writeString(out, declaredClass.getName()); // always write declared
if (declaredClass.isArray()) { // non-primitive or non-compact array
int length = Array.getLength(instance);
out.writeInt(length);
for (int i = 0; i < length; i++) {
writeObject(out, Array.get(instance, i),
declaredClass.getComponentType(), conf, allowCompactArrays);
}
} else if (declaredClass == ArrayPrimitiveWritable.Internal.class) {
((ArrayPrimitiveWritable.Internal) instance).write(out);
} else if (declaredClass == String.class) { // String
UTF8.writeString(out, (String)instance);
} else if (declaredClass.isPrimitive()) { // primitive type
if (declaredClass == Boolean.TYPE) { // boolean
out.writeBoolean(((Boolean)instance).booleanValue());
} else if (declaredClass == Character.TYPE) { // char
out.writeChar(((Character)instance).charValue());
} else if (declaredClass == Byte.TYPE) { // byte
out.writeByte(((Byte)instance).byteValue());
} else if (declaredClass == Short.TYPE) { // short
out.writeShort(((Short)instance).shortValue());
} else if (declaredClass == Integer.TYPE) { // int
out.writeInt(((Integer)instance).intValue());
} else if (declaredClass == Long.TYPE) { // long
out.writeLong(((Long)instance).longValue());
} else if (declaredClass == Float.TYPE) { // float
out.writeFloat(((Float)instance).floatValue());
} else if (declaredClass == Double.TYPE) { // double
out.writeDouble(((Double)instance).doubleValue());
} else if (declaredClass == Void.TYPE) { // void
} else {
throw new IllegalArgumentException("Not a primitive: "+declaredClass);
}
} else if (declaredClass.isEnum()) { // enum
UTF8.writeString(out, ((Enum)instance).name());
} else if (Writable.class.isAssignableFrom(declaredClass)) { // Writable
UTF8.writeString(out, instance.getClass().getName());
((Writable)instance).write(out);
} else if (Message.class.isAssignableFrom(declaredClass)) {
((Message)instance).writeDelimitedTo(
DataOutputOutputStream.constructOutputStream(out));
} else {
throw new IOException("Can't write: "+instance+" as "+declaredClass);
}
}
/** Read a {@link Writable}, {@link String}, primitive type, or an array of
* the preceding. */
public static Object readObject(DataInput in, Configuration conf)
throws IOException {
return readObject(in, null, conf);
}
/** Read a {@link Writable}, {@link String}, primitive type, or an array of
* the preceding. */
@SuppressWarnings("unchecked")
public static Object readObject(DataInput in, ObjectWritable objectWritable, Configuration conf)
throws IOException {
String className = UTF8.readString(in);
Class<?> declaredClass = PRIMITIVE_NAMES.get(className);
if (declaredClass == null) {
declaredClass = loadClass(conf, className);
}
Object instance;
if (declaredClass.isPrimitive()) { // primitive types
if (declaredClass == Boolean.TYPE) { // boolean
instance = Boolean.valueOf(in.readBoolean());
} else if (declaredClass == Character.TYPE) { // char
instance = Character.valueOf(in.readChar());
} else if (declaredClass == Byte.TYPE) { // byte
instance = Byte.valueOf(in.readByte());
} else if (declaredClass == Short.TYPE) { // short
instance = Short.valueOf(in.readShort());
} else if (declaredClass == Integer.TYPE) { // int
instance = Integer.valueOf(in.readInt());
} else if (declaredClass == Long.TYPE) { // long
instance = Long.valueOf(in.readLong());
} else if (declaredClass == Float.TYPE) { // float
instance = Float.valueOf(in.readFloat());
} else if (declaredClass == Double.TYPE) { // double
instance = Double.valueOf(in.readDouble());
} else if (declaredClass == Void.TYPE) { // void
instance = null;
} else {
throw new IllegalArgumentException("Not a primitive: "+declaredClass);
}
} else if (declaredClass.isArray()) { // array
int length = in.readInt();
instance = Array.newInstance(declaredClass.getComponentType(), length);
for (int i = 0; i < length; i++) {
Array.set(instance, i, readObject(in, conf));
}
} else if (declaredClass == ArrayPrimitiveWritable.Internal.class) {
// Read and unwrap ArrayPrimitiveWritable$Internal array.
// Always allow the read, even if write is disabled by allowCompactArrays.
ArrayPrimitiveWritable.Internal temp =
new ArrayPrimitiveWritable.Internal();
temp.readFields(in);
instance = temp.get();
declaredClass = instance.getClass();
} else if (declaredClass == String.class) { // String
instance = UTF8.readString(in);
} else if (declaredClass.isEnum()) { // enum
instance = Enum.valueOf((Class<? extends Enum>) declaredClass, UTF8.readString(in));
} else if (Message.class.isAssignableFrom(declaredClass)) {
instance = tryInstantiateProtobuf(declaredClass, in);
} else { // Writable
Class instanceClass = null;
String str = UTF8.readString(in);
instanceClass = loadClass(conf, str);
Writable writable = WritableFactories.newInstance(instanceClass, conf);
writable.readFields(in);
instance = writable;
if (instanceClass == NullInstance.class) { // null
declaredClass = ((NullInstance)instance).declaredClass;
instance = null;
}
}
if (objectWritable != null) { // store values
objectWritable.declaredClass = declaredClass;
objectWritable.instance = instance;
}
return instance;
}
/**
* Try to instantiate a protocol buffer of the given message class
* from the given input stream.
*
* @param protoClass the class of the generated protocol buffer
* @param dataIn the input stream to read from
* @return the instantiated Message instance
* @throws IOException if an IO problem occurs
*/
private static Message tryInstantiateProtobuf(
Class<?> protoClass,
DataInput dataIn) throws IOException {
try {
if (dataIn instanceof InputStream) {
// We can use the built-in parseDelimitedFrom and not have to re-copy
// the data
Method parseMethod = getStaticProtobufMethod(protoClass,
"parseDelimitedFrom", InputStream.class);
return (Message)parseMethod.invoke(null, (InputStream)dataIn);
} else {
// Have to read it into a buffer first, since protobuf doesn't deal
// with the DataInput interface directly.
// Read the size delimiter that writeDelimitedTo writes
int size = ProtoUtil.readRawVarint32(dataIn);
if (size < 0) {
throw new IOException("Invalid size: " + size);
}
byte[] data = new byte[size];
dataIn.readFully(data);
Method parseMethod = getStaticProtobufMethod(protoClass,
"parseFrom", byte[].class);
return (Message)parseMethod.invoke(null, data);
}
} catch (InvocationTargetException e) {
if (e.getCause() instanceof IOException) {
throw (IOException)e.getCause();
} else {
throw new IOException(e.getCause());
}
} catch (IllegalAccessException iae) {
throw new AssertionError("Could not access parse method in " +
protoClass);
}
}
static Method getStaticProtobufMethod(Class<?> declaredClass, String method,
Class<?> ... args) {
try {
return declaredClass.getMethod(method, args);
} catch (Exception e) {
// This is a bug in Hadoop - protobufs should all have this static method
throw new AssertionError("Protocol buffer class " + declaredClass +
" does not have an accessible parseFrom(InputStream) method!");
}
}
/**
* Find and load the class with given name <tt>className</tt> by first finding
* it in the specified <tt>conf</tt>. If the specified <tt>conf</tt> is null,
* try load it directly.
*/
public static Class<?> loadClass(Configuration conf, String className) {
Class<?> declaredClass = null;
try {
if (conf != null)
declaredClass = conf.getClassByName(className);
else
declaredClass = Class.forName(className);
} catch (ClassNotFoundException e) {
throw new RuntimeException("readObject can't find class " + className,
e);
}
return declaredClass;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return this.conf;
}
}
| 14,674 | 36.246193 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ShortWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A WritableComparable for shorts. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ShortWritable implements WritableComparable<ShortWritable> {
private short value;
public ShortWritable() {
}
public ShortWritable(short value) {
set(value);
}
/** Set the value of this ShortWritable. */
public void set(short value) {
this.value = value;
}
/** Return the value of this ShortWritable. */
public short get() {
return value;
}
/** read the short value */
@Override
public void readFields(DataInput in) throws IOException {
value = in.readShort();
}
/** write short value */
@Override
public void write(DataOutput out) throws IOException {
out.writeShort(value);
}
/** Returns true iff <code>o</code> is a ShortWritable with the same value. */
@Override
public boolean equals(Object o) {
if (!(o instanceof ShortWritable))
return false;
ShortWritable other = (ShortWritable) o;
return this.value == other.value;
}
/** hash code */
@Override
public int hashCode() {
return value;
}
/** Compares two ShortWritable. */
@Override
public int compareTo(ShortWritable o) {
short thisValue = this.value;
short thatValue = (o).value;
return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
}
/** Short values in string format */
@Override
public String toString() {
return Short.toString(value);
}
/** A Comparator optimized for ShortWritable. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(ShortWritable.class);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
short thisValue = (short) readUnsignedShort(b1, s1);
short thatValue = (short) readUnsignedShort(b2, s2);
return (thisValue < thatValue ? -1 : (thisValue == thatValue ? 0 : 1));
}
}
static { // register this comparator
WritableComparator.define(ShortWritable.class, new Comparator());
}
}
| 3,104 | 26.723214 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/LongWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A WritableComparable for longs. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LongWritable implements WritableComparable<LongWritable> {
private long value;
public LongWritable() {}
public LongWritable(long value) { set(value); }
/** Set the value of this LongWritable. */
public void set(long value) { this.value = value; }
/** Return the value of this LongWritable. */
public long get() { return value; }
@Override
public void readFields(DataInput in) throws IOException {
value = in.readLong();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(value);
}
/** Returns true iff <code>o</code> is a LongWritable with the same value. */
@Override
public boolean equals(Object o) {
if (!(o instanceof LongWritable))
return false;
LongWritable other = (LongWritable)o;
return this.value == other.value;
}
@Override
public int hashCode() {
return (int)value;
}
/** Compares two LongWritables. */
@Override
public int compareTo(LongWritable o) {
long thisValue = this.value;
long thatValue = o.value;
return (thisValue<thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
}
@Override
public String toString() {
return Long.toString(value);
}
/** A Comparator optimized for LongWritable. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(LongWritable.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
long thisValue = readLong(b1, s1);
long thatValue = readLong(b2, s2);
return (thisValue<thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
}
}
/** A decreasing Comparator optimized for LongWritable. */
public static class DecreasingComparator extends Comparator {
@Override
public int compare(WritableComparable a, WritableComparable b) {
return super.compare(b, a);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
return super.compare(b2, s2, l2, b1, s1, l1);
}
}
static { // register default comparator
WritableComparator.define(LongWritable.class, new Comparator());
}
}
| 3,374 | 28.094828 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.nio.charset.UnsupportedCharsetException;
import java.util.ArrayList;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.serializer.Deserializer;
import org.apache.hadoop.io.serializer.Serialization;
import org.apache.hadoop.io.serializer.SerializationFactory;
import org.apache.hadoop.io.serializer.Serializer;
import org.apache.hadoop.util.GenericsUtil;
/**
* DefaultStringifier is the default implementation of the {@link Stringifier}
* interface which stringifies the objects using base64 encoding of the
* serialized version of the objects. The {@link Serializer} and
* {@link Deserializer} are obtained from the {@link SerializationFactory}.
* <br>
* DefaultStringifier offers convenience methods to store/load objects to/from
* the configuration.
*
* @param <T> the class of the objects to stringify
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class DefaultStringifier<T> implements Stringifier<T> {
private static final String SEPARATOR = ",";
private Serializer<T> serializer;
private Deserializer<T> deserializer;
private DataInputBuffer inBuf;
private DataOutputBuffer outBuf;
public DefaultStringifier(Configuration conf, Class<T> c) {
SerializationFactory factory = new SerializationFactory(conf);
this.serializer = factory.getSerializer(c);
this.deserializer = factory.getDeserializer(c);
this.inBuf = new DataInputBuffer();
this.outBuf = new DataOutputBuffer();
try {
serializer.open(outBuf);
deserializer.open(inBuf);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
@Override
public T fromString(String str) throws IOException {
try {
byte[] bytes = Base64.decodeBase64(str.getBytes("UTF-8"));
inBuf.reset(bytes, bytes.length);
T restored = deserializer.deserialize(null);
return restored;
} catch (UnsupportedCharsetException ex) {
throw new IOException(ex.toString());
}
}
@Override
public String toString(T obj) throws IOException {
outBuf.reset();
serializer.serialize(obj);
byte[] buf = new byte[outBuf.getLength()];
System.arraycopy(outBuf.getData(), 0, buf, 0, buf.length);
return new String(Base64.encodeBase64(buf), Charsets.UTF_8);
}
@Override
public void close() throws IOException {
inBuf.close();
outBuf.close();
deserializer.close();
serializer.close();
}
/**
* Stores the item in the configuration with the given keyName.
*
* @param <K> the class of the item
* @param conf the configuration to store
* @param item the object to be stored
* @param keyName the name of the key to use
* @throws IOException : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> void store(Configuration conf, K item, String keyName)
throws IOException {
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
GenericsUtil.getClass(item));
conf.set(keyName, stringifier.toString(item));
stringifier.close();
}
/**
* Restores the object from the configuration.
*
* @param <K> the class of the item
* @param conf the configuration to use
* @param keyName the name of the key to use
* @param itemClass the class of the item
* @return restored object
* @throws IOException : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> K load(Configuration conf, String keyName,
Class<K> itemClass) throws IOException {
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
itemClass);
try {
String itemStr = conf.get(keyName);
return stringifier.fromString(itemStr);
} finally {
stringifier.close();
}
}
/**
* Stores the array of items in the configuration with the given keyName.
*
* @param <K> the class of the item
* @param conf the configuration to use
* @param items the objects to be stored
* @param keyName the name of the key to use
* @throws IndexOutOfBoundsException if the items array is empty
* @throws IOException : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> void storeArray(Configuration conf, K[] items,
String keyName) throws IOException {
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
GenericsUtil.getClass(items[0]));
try {
StringBuilder builder = new StringBuilder();
for (K item : items) {
builder.append(stringifier.toString(item)).append(SEPARATOR);
}
conf.set(keyName, builder.toString());
}
finally {
stringifier.close();
}
}
/**
* Restores the array of objects from the configuration.
*
* @param <K> the class of the item
* @param conf the configuration to use
* @param keyName the name of the key to use
* @param itemClass the class of the item
* @return restored object
* @throws IOException : forwards Exceptions from the underlying
* {@link Serialization} classes.
*/
public static <K> K[] loadArray(Configuration conf, String keyName,
Class<K> itemClass) throws IOException {
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
itemClass);
try {
String itemStr = conf.get(keyName);
ArrayList<K> list = new ArrayList<K>();
String[] parts = itemStr.split(SEPARATOR);
for (String part : parts) {
if (!part.isEmpty())
list.add(stringifier.fromString(part));
}
return GenericsUtil.toArray(itemClass, list);
}
finally {
stringifier.close();
}
}
}
| 6,798 | 31.6875 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Writable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataOutput;
import java.io.DataInput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A serializable object which implements a simple, efficient, serialization
* protocol, based on {@link DataInput} and {@link DataOutput}.
*
* <p>Any <code>key</code> or <code>value</code> type in the Hadoop Map-Reduce
* framework implements this interface.</p>
*
* <p>Implementations typically implement a static <code>read(DataInput)</code>
* method which constructs a new instance, calls {@link #readFields(DataInput)}
* and returns the instance.</p>
*
* <p>Example:</p>
* <p><blockquote><pre>
* public class MyWritable implements Writable {
* // Some data
* private int counter;
* private long timestamp;
*
* public void write(DataOutput out) throws IOException {
* out.writeInt(counter);
* out.writeLong(timestamp);
* }
*
* public void readFields(DataInput in) throws IOException {
* counter = in.readInt();
* timestamp = in.readLong();
* }
*
* public static MyWritable read(DataInput in) throws IOException {
* MyWritable w = new MyWritable();
* w.readFields(in);
* return w;
* }
* }
* </pre></blockquote></p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface Writable {
/**
* Serialize the fields of this object to <code>out</code>.
*
* @param out <code>DataOuput</code> to serialize this object into.
* @throws IOException
*/
void write(DataOutput out) throws IOException;
/**
* Deserialize the fields of this object from <code>in</code>.
*
* <p>For efficiency, implementations should attempt to re-use storage in the
* existing object where possible.</p>
*
* @param in <code>DataInput</code> to deseriablize this object from.
* @throws IOException
*/
void readFields(DataInput in) throws IOException;
}
| 2,919 | 32.953488 | 80 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.