repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.util.Arrays;
import java.util.UUID;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
import org.apache.hadoop.util.LightWeightCache;
import org.apache.hadoop.util.LightWeightGSet;
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* Maintains a cache of non-idempotent requests that have been successfully
* processed by the RPC server implementation, to handle the retries. A request
* is uniquely identified by the unique client ID + call ID of the RPC request.
* On receiving retried request, an entry will be found in the
* {@link RetryCache} and the previous response is sent back to the request.
* <p>
* To look an implementation using this cache, see HDFS FSNamesystem class.
*/
@InterfaceAudience.Private
public class RetryCache {
public static final Log LOG = LogFactory.getLog(RetryCache.class);
private final RetryCacheMetrics retryCacheMetrics;
/**
* CacheEntry is tracked using unique client ID and callId of the RPC request
*/
public static class CacheEntry implements LightWeightCache.Entry {
/**
* Processing state of the requests
*/
private static byte INPROGRESS = 0;
private static byte SUCCESS = 1;
private static byte FAILED = 2;
private byte state = INPROGRESS;
// Store uuid as two long for better memory utilization
private final long clientIdMsb; // Most signficant bytes
private final long clientIdLsb; // Least significant bytes
private final int callId;
private final long expirationTime;
private LightWeightGSet.LinkedElement next;
CacheEntry(byte[] clientId, int callId, long expirationTime) {
// ClientId must be a UUID - that is 16 octets.
Preconditions.checkArgument(clientId.length == ClientId.BYTE_LENGTH,
"Invalid clientId - length is " + clientId.length
+ " expected length " + ClientId.BYTE_LENGTH);
// Convert UUID bytes to two longs
clientIdMsb = ClientId.getMsb(clientId);
clientIdLsb = ClientId.getLsb(clientId);
this.callId = callId;
this.expirationTime = expirationTime;
}
CacheEntry(byte[] clientId, int callId, long expirationTime,
boolean success) {
this(clientId, callId, expirationTime);
this.state = success ? SUCCESS : FAILED;
}
private static int hashCode(long value) {
return (int)(value ^ (value >>> 32));
}
@Override
public int hashCode() {
return (hashCode(clientIdMsb) * 31 + hashCode(clientIdLsb)) * 31 + callId;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof CacheEntry)) {
return false;
}
CacheEntry other = (CacheEntry) obj;
return callId == other.callId && clientIdMsb == other.clientIdMsb
&& clientIdLsb == other.clientIdLsb;
}
@Override
public void setNext(LinkedElement next) {
this.next = next;
}
@Override
public LinkedElement getNext() {
return next;
}
synchronized void completed(boolean success) {
state = success ? SUCCESS : FAILED;
this.notifyAll();
}
public synchronized boolean isSuccess() {
return state == SUCCESS;
}
@Override
public void setExpirationTime(long timeNano) {
// expiration time does not change
}
@Override
public long getExpirationTime() {
return expirationTime;
}
@Override
public String toString() {
return (new UUID(this.clientIdMsb, this.clientIdLsb)).toString() + ":"
+ this.callId + ":" + this.state;
}
}
/**
* CacheEntry with payload that tracks the previous response or parts of
* previous response to be used for generating response for retried requests.
*/
public static class CacheEntryWithPayload extends CacheEntry {
private Object payload;
CacheEntryWithPayload(byte[] clientId, int callId, Object payload,
long expirationTime) {
super(clientId, callId, expirationTime);
this.payload = payload;
}
CacheEntryWithPayload(byte[] clientId, int callId, Object payload,
long expirationTime, boolean success) {
super(clientId, callId, expirationTime, success);
this.payload = payload;
}
/** Override equals to avoid findbugs warnings */
@Override
public boolean equals(Object obj) {
return super.equals(obj);
}
/** Override hashcode to avoid findbugs warnings */
@Override
public int hashCode() {
return super.hashCode();
}
public Object getPayload() {
return payload;
}
}
private final LightWeightGSet<CacheEntry, CacheEntry> set;
private final long expirationTime;
private String cacheName;
private final ReentrantLock lock = new ReentrantLock();
/**
* Constructor
* @param cacheName name to identify the cache by
* @param percentage percentage of total java heap space used by this cache
* @param expirationTime time for an entry to expire in nanoseconds
*/
public RetryCache(String cacheName, double percentage, long expirationTime) {
int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
capacity = capacity > 16 ? capacity : 16;
this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
expirationTime, 0);
this.expirationTime = expirationTime;
this.cacheName = cacheName;
this.retryCacheMetrics = RetryCacheMetrics.create(this);
}
private static boolean skipRetryCache() {
// Do not track non RPC invocation or RPC requests with
// invalid callId or clientId in retry cache
return !Server.isRpcInvocation() || Server.getCallId() < 0
|| Arrays.equals(Server.getClientId(), RpcConstants.DUMMY_CLIENT_ID);
}
public void lock() {
this.lock.lock();
}
public void unlock() {
this.lock.unlock();
}
private void incrCacheClearedCounter() {
retryCacheMetrics.incrCacheCleared();
}
@VisibleForTesting
public LightWeightGSet<CacheEntry, CacheEntry> getCacheSet() {
return set;
}
@VisibleForTesting
public RetryCacheMetrics getMetricsForTests() {
return retryCacheMetrics;
}
/**
* This method returns cache name for metrics.
*/
public String getCacheName() {
return cacheName;
}
/**
* This method handles the following conditions:
* <ul>
* <li>If retry is not to be processed, return null</li>
* <li>If there is no cache entry, add a new entry {@code newEntry} and return
* it.</li>
* <li>If there is an existing entry, wait for its completion. If the
* completion state is {@link CacheEntry#FAILED}, the expectation is that the
* thread that waited for completion, retries the request. the
* {@link CacheEntry} state is set to {@link CacheEntry#INPROGRESS} again.
* <li>If the completion state is {@link CacheEntry#SUCCESS}, the entry is
* returned so that the thread that waits for it can can return previous
* response.</li>
* <ul>
*
* @return {@link CacheEntry}.
*/
private CacheEntry waitForCompletion(CacheEntry newEntry) {
CacheEntry mapEntry = null;
lock.lock();
try {
mapEntry = set.get(newEntry);
// If an entry in the cache does not exist, add a new one
if (mapEntry == null) {
if (LOG.isTraceEnabled()) {
LOG.trace("Adding Rpc request clientId "
+ newEntry.clientIdMsb + newEntry.clientIdLsb + " callId "
+ newEntry.callId + " to retryCache");
}
set.put(newEntry);
retryCacheMetrics.incrCacheUpdated();
return newEntry;
} else {
retryCacheMetrics.incrCacheHit();
}
} finally {
lock.unlock();
}
// Entry already exists in cache. Wait for completion and return its state
Preconditions.checkNotNull(mapEntry,
"Entry from the cache should not be null");
// Wait for in progress request to complete
synchronized (mapEntry) {
while (mapEntry.state == CacheEntry.INPROGRESS) {
try {
mapEntry.wait();
} catch (InterruptedException ie) {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
}
// Previous request has failed, the expectation is is that it will be
// retried again.
if (mapEntry.state != CacheEntry.SUCCESS) {
mapEntry.state = CacheEntry.INPROGRESS;
}
}
return mapEntry;
}
/**
* Add a new cache entry into the retry cache. The cache entry consists of
* clientId and callId extracted from editlog.
*/
public void addCacheEntry(byte[] clientId, int callId) {
CacheEntry newEntry = new CacheEntry(clientId, callId, System.nanoTime()
+ expirationTime, true);
lock.lock();
try {
set.put(newEntry);
} finally {
lock.unlock();
}
retryCacheMetrics.incrCacheUpdated();
}
public void addCacheEntryWithPayload(byte[] clientId, int callId,
Object payload) {
// since the entry is loaded from editlog, we can assume it succeeded.
CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload,
System.nanoTime() + expirationTime, true);
lock.lock();
try {
set.put(newEntry);
} finally {
lock.unlock();
}
retryCacheMetrics.incrCacheUpdated();
}
private static CacheEntry newEntry(long expirationTime) {
return new CacheEntry(Server.getClientId(), Server.getCallId(),
System.nanoTime() + expirationTime);
}
private static CacheEntryWithPayload newEntry(Object payload,
long expirationTime) {
return new CacheEntryWithPayload(Server.getClientId(), Server.getCallId(),
payload, System.nanoTime() + expirationTime);
}
/** Static method that provides null check for retryCache */
public static CacheEntry waitForCompletion(RetryCache cache) {
if (skipRetryCache()) {
return null;
}
return cache != null ? cache
.waitForCompletion(newEntry(cache.expirationTime)) : null;
}
/** Static method that provides null check for retryCache */
public static CacheEntryWithPayload waitForCompletion(RetryCache cache,
Object payload) {
if (skipRetryCache()) {
return null;
}
return (CacheEntryWithPayload) (cache != null ? cache
.waitForCompletion(newEntry(payload, cache.expirationTime)) : null);
}
public static void setState(CacheEntry e, boolean success) {
if (e == null) {
return;
}
e.completed(success);
}
public static void setState(CacheEntryWithPayload e, boolean success,
Object payload) {
if (e == null) {
return;
}
e.payload = payload;
e.completed(success);
}
public static void clear(RetryCache cache) {
if (cache != null) {
cache.set.clear();
cache.incrCacheClearedCounter();
}
}
}
| 12,060 | 30.408854 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.nio.ByteBuffer;
import java.util.UUID;
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.base.Preconditions;
/**
* A class defining a set of static helper methods to provide conversion between
* bytes and string for UUID-based client Id.
*/
@InterfaceAudience.Private
public class ClientId {
/** The byte array of a UUID should be 16 */
public static final int BYTE_LENGTH = 16;
private static final int shiftWidth = 8;
/**
* Return clientId as byte[]
*/
public static byte[] getClientId() {
UUID uuid = UUID.randomUUID();
ByteBuffer buf = ByteBuffer.wrap(new byte[BYTE_LENGTH]);
buf.putLong(uuid.getMostSignificantBits());
buf.putLong(uuid.getLeastSignificantBits());
return buf.array();
}
/** Convert a clientId byte[] to string */
public static String toString(byte[] clientId) {
// clientId can be null or an empty array
if (clientId == null || clientId.length == 0) {
return "";
}
// otherwise should be 16 bytes
Preconditions.checkArgument(clientId.length == BYTE_LENGTH);
long msb = getMsb(clientId);
long lsb = getLsb(clientId);
return (new UUID(msb, lsb)).toString();
}
public static long getMsb(byte[] clientId) {
long msb = 0;
for (int i = 0; i < BYTE_LENGTH/2; i++) {
msb = (msb << shiftWidth) | (clientId[i] & 0xff);
}
return msb;
}
public static long getLsb(byte[] clientId) {
long lsb = 0;
for (int i = BYTE_LENGTH/2; i < BYTE_LENGTH; i++) {
lsb = (lsb << shiftWidth) | (clientId[i] & 0xff);
}
return lsb;
}
/** Convert from clientId string byte[] representation of clientId */
public static byte[] toBytes(String id) {
if (id == null || "".equals(id)) {
return new byte[0];
}
UUID uuid = UUID.fromString(id);
ByteBuffer buf = ByteBuffer.wrap(new byte[BYTE_LENGTH]);
buf.putLong(uuid.getMostSignificantBits());
buf.putLong(uuid.getLeastSignificantBits());
return buf.array();
}
}
| 2,880 | 30.659341 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/IpcException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
/**
* IPC exception is thrown by IPC layer when the IPC
* connection cannot be established.
*/
public class IpcException extends IOException {
private static final long serialVersionUID = 1L;
final String errMsg;
public IpcException(final String err) {
errMsg = err;
}
}
| 1,159 | 32.142857 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcNoSuchMethodException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
/**
* No such Method for an Rpc Call
*
*/
public class RpcNoSuchMethodException extends RpcServerException {
private static final long serialVersionUID = 1L;
public RpcNoSuchMethodException(final String message) {
super(message);
}
/**
* get the rpc status corresponding to this exception
*/
public RpcStatusProto getRpcStatusProto() {
return RpcStatusProto.ERROR;
}
/**
* get the detailed rpc status corresponding to this exception
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_NO_SUCH_METHOD;
}
}
| 1,616 | 32.6875 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WeightedRoundRobinMultiplexer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
/**
* Determines which queue to start reading from, occasionally drawing from
* low-priority queues in order to prevent starvation. Given the pull pattern
* [9, 4, 1] for 3 queues:
*
* The cycle is (a minimum of) 9+4+1=14 reads.
* Queue 0 is read (at least) 9 times
* Queue 1 is read (at least) 4 times
* Queue 2 is read (at least) 1 time
* Repeat
*
* There may be more reads than the minimum due to race conditions. This is
* allowed by design for performance reasons.
*/
public class WeightedRoundRobinMultiplexer implements RpcMultiplexer {
// Config keys
public static final String IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY =
"faircallqueue.multiplexer.weights";
public static final Log LOG =
LogFactory.getLog(WeightedRoundRobinMultiplexer.class);
private final int numQueues; // The number of queues under our provisioning
private final AtomicInteger currentQueueIndex; // Current queue we're serving
private final AtomicInteger requestsLeft; // Number of requests left for this queue
private int[] queueWeights; // The weights for each queue
public WeightedRoundRobinMultiplexer(int aNumQueues, String ns,
Configuration conf) {
if (aNumQueues <= 0) {
throw new IllegalArgumentException("Requested queues (" + aNumQueues +
") must be greater than zero.");
}
this.numQueues = aNumQueues;
this.queueWeights = conf.getInts(ns + "." +
IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY);
if (this.queueWeights.length == 0) {
this.queueWeights = getDefaultQueueWeights(this.numQueues);
} else if (this.queueWeights.length != this.numQueues) {
throw new IllegalArgumentException(ns + "." +
IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY + " must specify exactly " +
this.numQueues + " weights: one for each priority level.");
}
this.currentQueueIndex = new AtomicInteger(0);
this.requestsLeft = new AtomicInteger(this.queueWeights[0]);
LOG.info("WeightedRoundRobinMultiplexer is being used.");
}
/**
* Creates default weights for each queue. The weights are 2^N.
*/
private int[] getDefaultQueueWeights(int aNumQueues) {
int[] weights = new int[aNumQueues];
int weight = 1; // Start low
for(int i = aNumQueues - 1; i >= 0; i--) { // Start at lowest queue
weights[i] = weight;
weight *= 2; // Double every iteration
}
return weights;
}
/**
* Move to the next queue.
*/
private void moveToNextQueue() {
int thisIdx = this.currentQueueIndex.get();
// Wrap to fit in our bounds
int nextIdx = (thisIdx + 1) % this.numQueues;
// Set to next index: once this is called, requests will start being
// drawn from nextIdx, but requestsLeft will continue to decrement into
// the negatives
this.currentQueueIndex.set(nextIdx);
// Finally, reset requestsLeft. This will enable moveToNextQueue to be
// called again, for the new currentQueueIndex
this.requestsLeft.set(this.queueWeights[nextIdx]);
}
/**
* Advances the index, which will change the current index
* if called enough times.
*/
private void advanceIndex() {
// Since we did read, we should decrement
int requestsLeftVal = this.requestsLeft.decrementAndGet();
// Strict compare with zero (instead of inequality) so that if another
// thread decrements requestsLeft, only one thread will be responsible
// for advancing currentQueueIndex
if (requestsLeftVal == 0) {
// This is guaranteed to be called exactly once per currentQueueIndex
this.moveToNextQueue();
}
}
/**
* Gets the current index. Should be accompanied by a call to
* advanceIndex at some point.
*/
private int getCurrentIndex() {
return this.currentQueueIndex.get();
}
/**
* Use the mux by getting and advancing index.
*/
public int getAndAdvanceCurrentIndex() {
int idx = this.getCurrentIndex();
this.advanceIndex();
return idx;
}
}
| 4,971 | 32.369128 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.AbstractQueue;
import java.util.HashMap;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.util.MBeans;
/**
* A queue with multiple levels for each priority.
*/
public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
implements BlockingQueue<E> {
// Configuration Keys
public static final int IPC_CALLQUEUE_PRIORITY_LEVELS_DEFAULT = 4;
public static final String IPC_CALLQUEUE_PRIORITY_LEVELS_KEY =
"faircallqueue.priority-levels";
public static final Log LOG = LogFactory.getLog(FairCallQueue.class);
/* The queues */
private final ArrayList<BlockingQueue<E>> queues;
/* Read locks */
private final ReentrantLock takeLock = new ReentrantLock();
private final Condition notEmpty = takeLock.newCondition();
private void signalNotEmpty() {
takeLock.lock();
try {
notEmpty.signal();
} finally {
takeLock.unlock();
}
}
/* Scheduler picks which queue to place in */
private RpcScheduler scheduler;
/* Multiplexer picks which queue to draw from */
private RpcMultiplexer multiplexer;
/* Statistic tracking */
private final ArrayList<AtomicLong> overflowedCalls;
/**
* Create a FairCallQueue.
* @param capacity the maximum size of each sub-queue
* @param ns the prefix to use for configuration
* @param conf the configuration to read from
* Notes: the FairCallQueue has no fixed capacity. Rather, it has a minimum
* capacity of `capacity` and a maximum capacity of `capacity * number_queues`
*/
public FairCallQueue(int capacity, String ns, Configuration conf) {
int numQueues = parseNumQueues(ns, conf);
LOG.info("FairCallQueue is in use with " + numQueues + " queues.");
this.queues = new ArrayList<BlockingQueue<E>>(numQueues);
this.overflowedCalls = new ArrayList<AtomicLong>(numQueues);
for(int i=0; i < numQueues; i++) {
this.queues.add(new LinkedBlockingQueue<E>(capacity));
this.overflowedCalls.add(new AtomicLong(0));
}
this.scheduler = new DecayRpcScheduler(numQueues, ns, conf);
this.multiplexer = new WeightedRoundRobinMultiplexer(numQueues, ns, conf);
// Make this the active source of metrics
MetricsProxy mp = MetricsProxy.getInstance(ns);
mp.setDelegate(this);
}
/**
* Read the number of queues from the configuration.
* This will affect the FairCallQueue's overall capacity.
* @throws IllegalArgumentException on invalid queue count
*/
private static int parseNumQueues(String ns, Configuration conf) {
int retval = conf.getInt(ns + "." + IPC_CALLQUEUE_PRIORITY_LEVELS_KEY,
IPC_CALLQUEUE_PRIORITY_LEVELS_DEFAULT);
if(retval < 1) {
throw new IllegalArgumentException("numQueues must be at least 1");
}
return retval;
}
/**
* Returns the first non-empty queue with equal or lesser priority
* than <i>startIdx</i>. Wraps around, searching a maximum of N
* queues, where N is this.queues.size().
*
* @param startIdx the queue number to start searching at
* @return the first non-empty queue with less priority, or null if
* everything was empty
*/
private BlockingQueue<E> getFirstNonEmptyQueue(int startIdx) {
final int numQueues = this.queues.size();
for(int i=0; i < numQueues; i++) {
int idx = (i + startIdx) % numQueues; // offset and wrap around
BlockingQueue<E> queue = this.queues.get(idx);
if (queue.size() != 0) {
return queue;
}
}
// All queues were empty
return null;
}
/* AbstractQueue and BlockingQueue methods */
/**
* Put and offer follow the same pattern:
* 1. Get a priorityLevel from the scheduler
* 2. Get the nth sub-queue matching this priorityLevel
* 3. delegate the call to this sub-queue.
*
* But differ in how they handle overflow:
* - Put will move on to the next queue until it lands on the last queue
* - Offer does not attempt other queues on overflow
*/
@Override
public void put(E e) throws InterruptedException {
int priorityLevel = scheduler.getPriorityLevel(e);
final int numLevels = this.queues.size();
while (true) {
BlockingQueue<E> q = this.queues.get(priorityLevel);
boolean res = q.offer(e);
if (!res) {
// Update stats
this.overflowedCalls.get(priorityLevel).getAndIncrement();
// If we failed to insert, try again on the next level
priorityLevel++;
if (priorityLevel == numLevels) {
// That was the last one, we will block on put in the last queue
// Delete this line to drop the call
this.queues.get(priorityLevel-1).put(e);
break;
}
} else {
break;
}
}
signalNotEmpty();
}
@Override
public boolean offer(E e, long timeout, TimeUnit unit)
throws InterruptedException {
int priorityLevel = scheduler.getPriorityLevel(e);
BlockingQueue<E> q = this.queues.get(priorityLevel);
boolean ret = q.offer(e, timeout, unit);
signalNotEmpty();
return ret;
}
@Override
public boolean offer(E e) {
int priorityLevel = scheduler.getPriorityLevel(e);
BlockingQueue<E> q = this.queues.get(priorityLevel);
boolean ret = q.offer(e);
signalNotEmpty();
return ret;
}
@Override
public E take() throws InterruptedException {
int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
takeLock.lockInterruptibly();
try {
// Wait while queue is empty
for (;;) {
BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
if (q != null) {
// Got queue, so return if we can poll out an object
E e = q.poll();
if (e != null) {
return e;
}
}
notEmpty.await();
}
} finally {
takeLock.unlock();
}
}
@Override
public E poll(long timeout, TimeUnit unit)
throws InterruptedException {
int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
long nanos = unit.toNanos(timeout);
takeLock.lockInterruptibly();
try {
for (;;) {
BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
if (q != null) {
E e = q.poll();
if (e != null) {
// Escape condition: there might be something available
return e;
}
}
if (nanos <= 0) {
// Wait has elapsed
return null;
}
try {
// Now wait on the condition for a bit. If we get
// spuriously awoken we'll re-loop
nanos = notEmpty.awaitNanos(nanos);
} catch (InterruptedException ie) {
notEmpty.signal(); // propagate to a non-interrupted thread
throw ie;
}
}
} finally {
takeLock.unlock();
}
}
/**
* poll() provides no strict consistency: it is possible for poll to return
* null even though an element is in the queue.
*/
@Override
public E poll() {
int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
if (q == null) {
return null; // everything is empty
}
// Delegate to the sub-queue's poll, which could still return null
return q.poll();
}
/**
* Peek, like poll, provides no strict consistency.
*/
@Override
public E peek() {
BlockingQueue<E> q = this.getFirstNonEmptyQueue(0);
if (q == null) {
return null;
} else {
return q.peek();
}
}
/**
* Size returns the sum of all sub-queue sizes, so it may be greater than
* capacity.
* Note: size provides no strict consistency, and should not be used to
* control queue IO.
*/
@Override
public int size() {
int size = 0;
for (BlockingQueue q : this.queues) {
size += q.size();
}
return size;
}
/**
* Iterator is not implemented, as it is not needed.
*/
@Override
public Iterator<E> iterator() {
throw new NotImplementedException();
}
/**
* drainTo defers to each sub-queue. Note that draining from a FairCallQueue
* to another FairCallQueue will likely fail, since the incoming calls
* may be scheduled differently in the new FairCallQueue. Nonetheless this
* method is provided for completeness.
*/
@Override
public int drainTo(Collection<? super E> c, int maxElements) {
int sum = 0;
for (BlockingQueue<E> q : this.queues) {
sum += q.drainTo(c, maxElements);
}
return sum;
}
@Override
public int drainTo(Collection<? super E> c) {
int sum = 0;
for (BlockingQueue<E> q : this.queues) {
sum += q.drainTo(c);
}
return sum;
}
/**
* Returns maximum remaining capacity. This does not reflect how much you can
* ideally fit in this FairCallQueue, as that would depend on the scheduler's
* decisions.
*/
@Override
public int remainingCapacity() {
int sum = 0;
for (BlockingQueue q : this.queues) {
sum += q.remainingCapacity();
}
return sum;
}
/**
* MetricsProxy is a singleton because we may init multiple
* FairCallQueues, but the metrics system cannot unregister beans cleanly.
*/
private static final class MetricsProxy implements FairCallQueueMXBean {
// One singleton per namespace
private static final HashMap<String, MetricsProxy> INSTANCES =
new HashMap<String, MetricsProxy>();
// Weakref for delegate, so we don't retain it forever if it can be GC'd
private WeakReference<FairCallQueue> delegate;
// Keep track of how many objects we registered
private int revisionNumber = 0;
private MetricsProxy(String namespace) {
MBeans.register(namespace, "FairCallQueue", this);
}
public static synchronized MetricsProxy getInstance(String namespace) {
MetricsProxy mp = INSTANCES.get(namespace);
if (mp == null) {
// We must create one
mp = new MetricsProxy(namespace);
INSTANCES.put(namespace, mp);
}
return mp;
}
public void setDelegate(FairCallQueue obj) {
this.delegate = new WeakReference<FairCallQueue>(obj);
this.revisionNumber++;
}
@Override
public int[] getQueueSizes() {
FairCallQueue obj = this.delegate.get();
if (obj == null) {
return new int[]{};
}
return obj.getQueueSizes();
}
@Override
public long[] getOverflowedCalls() {
FairCallQueue obj = this.delegate.get();
if (obj == null) {
return new long[]{};
}
return obj.getOverflowedCalls();
}
@Override public int getRevision() {
return revisionNumber;
}
}
// FairCallQueueMXBean
public int[] getQueueSizes() {
int numQueues = queues.size();
int[] sizes = new int[numQueues];
for (int i=0; i < numQueues; i++) {
sizes[i] = queues.get(i).size();
}
return sizes;
}
public long[] getOverflowedCalls() {
int numQueues = queues.size();
long[] calls = new long[numQueues];
for (int i=0; i < numQueues; i++) {
calls[i] = overflowedCalls.get(i).get();
}
return calls;
}
// For testing
@VisibleForTesting
public void setScheduler(RpcScheduler newScheduler) {
this.scheduler = newScheduler;
}
@VisibleForTesting
public void setMultiplexer(RpcMultiplexer newMux) {
this.multiplexer = newMux;
}
}
| 12,863 | 27.586667 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.HashMap;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import com.google.common.annotations.VisibleForTesting;
public class ProtocolSignature implements Writable {
static { // register a ctor
WritableFactories.setFactory
(ProtocolSignature.class,
new WritableFactory() {
@Override
public Writable newInstance() { return new ProtocolSignature(); }
});
}
private long version;
private int[] methods = null; // an array of method hash codes
/**
* default constructor
*/
public ProtocolSignature() {
}
/**
* Constructor
*
* @param version server version
* @param methodHashcodes hash codes of the methods supported by server
*/
public ProtocolSignature(long version, int[] methodHashcodes) {
this.version = version;
this.methods = methodHashcodes;
}
public long getVersion() {
return version;
}
public int[] getMethods() {
return methods;
}
@Override
public void readFields(DataInput in) throws IOException {
version = in.readLong();
boolean hasMethods = in.readBoolean();
if (hasMethods) {
int numMethods = in.readInt();
methods = new int[numMethods];
for (int i=0; i<numMethods; i++) {
methods[i] = in.readInt();
}
}
}
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(version);
if (methods == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeInt(methods.length);
for (int method : methods) {
out.writeInt(method);
}
}
}
/**
* Calculate a method's hash code considering its method
* name, returning type, and its parameter types
*
* @param method a method
* @return its hash code
*/
static int getFingerprint(Method method) {
int hashcode = method.getName().hashCode();
hashcode = hashcode + 31*method.getReturnType().getName().hashCode();
for (Class<?> type : method.getParameterTypes()) {
hashcode = 31*hashcode ^ type.getName().hashCode();
}
return hashcode;
}
/**
* Convert an array of Method into an array of hash codes
*
* @param methods
* @return array of hash codes
*/
private static int[] getFingerprints(Method[] methods) {
if (methods == null) {
return null;
}
int[] hashCodes = new int[methods.length];
for (int i = 0; i<methods.length; i++) {
hashCodes[i] = getFingerprint(methods[i]);
}
return hashCodes;
}
/**
* Get the hash code of an array of methods
* Methods are sorted before hashcode is calculated.
* So the returned value is irrelevant of the method order in the array.
*
* @param methods an array of methods
* @return the hash code
*/
static int getFingerprint(Method[] methods) {
return getFingerprint(getFingerprints(methods));
}
/**
* Get the hash code of an array of hashcodes
* Hashcodes are sorted before hashcode is calculated.
* So the returned value is irrelevant of the hashcode order in the array.
*
* @param methods an array of methods
* @return the hash code
*/
static int getFingerprint(int[] hashcodes) {
Arrays.sort(hashcodes);
return Arrays.hashCode(hashcodes);
}
private static class ProtocolSigFingerprint {
private ProtocolSignature signature;
private int fingerprint;
ProtocolSigFingerprint(ProtocolSignature sig, int fingerprint) {
this.signature = sig;
this.fingerprint = fingerprint;
}
}
/**
* A cache that maps a protocol's name to its signature & finger print
*/
private final static HashMap<String, ProtocolSigFingerprint>
PROTOCOL_FINGERPRINT_CACHE =
new HashMap<String, ProtocolSigFingerprint>();
@VisibleForTesting
public static void resetCache() {
PROTOCOL_FINGERPRINT_CACHE.clear();
}
/**
* Return a protocol's signature and finger print from cache
*
* @param protocol a protocol class
* @param serverVersion protocol version
* @return its signature and finger print
*/
private static ProtocolSigFingerprint getSigFingerprint(
Class <?> protocol, long serverVersion) {
String protocolName = RPC.getProtocolName(protocol);
synchronized (PROTOCOL_FINGERPRINT_CACHE) {
ProtocolSigFingerprint sig = PROTOCOL_FINGERPRINT_CACHE.get(protocolName);
if (sig == null) {
int[] serverMethodHashcodes = getFingerprints(protocol.getMethods());
sig = new ProtocolSigFingerprint(
new ProtocolSignature(serverVersion, serverMethodHashcodes),
getFingerprint(serverMethodHashcodes));
PROTOCOL_FINGERPRINT_CACHE.put(protocolName, sig);
}
return sig;
}
}
/**
* Get a server protocol's signature
*
* @param clientMethodsHashCode client protocol methods hashcode
* @param serverVersion server protocol version
* @param protocol protocol
* @return the server's protocol signature
*/
public static ProtocolSignature getProtocolSignature(
int clientMethodsHashCode,
long serverVersion,
Class<? extends VersionedProtocol> protocol) {
// try to get the finger print & signature from the cache
ProtocolSigFingerprint sig = getSigFingerprint(protocol, serverVersion);
// check if the client side protocol matches the one on the server side
if (clientMethodsHashCode == sig.fingerprint) {
return new ProtocolSignature(serverVersion, null); // null indicates a match
}
return sig.signature;
}
public static ProtocolSignature getProtocolSignature(String protocolName,
long version) throws ClassNotFoundException {
Class<?> protocol = Class.forName(protocolName);
return getSigFingerprint(protocol, version).signature;
}
/**
* Get a server protocol's signature
*
* @param server server implementation
* @param protocol server protocol
* @param clientVersion client's version
* @param clientMethodsHash client's protocol's hash code
* @return the server protocol's signature
* @throws IOException if any error occurs
*/
@SuppressWarnings("unchecked")
public static ProtocolSignature getProtocolSignature(VersionedProtocol server,
String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
Class<? extends VersionedProtocol> inter;
try {
inter = (Class<? extends VersionedProtocol>)Class.forName(protocol);
} catch (Exception e) {
throw new IOException(e);
}
long serverVersion = server.getProtocolVersion(protocol, clientVersion);
return ProtocolSignature.getProtocolSignature(
clientMethodsHash, serverVersion, inter);
}
}
| 7,846 | 29.652344 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueueMXBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
public interface FairCallQueueMXBean {
// Get the size of each subqueue, the index corrosponding to the priority
// level.
int[] getQueueSizes();
long[] getOverflowedCalls();
int getRevision();
}
| 1,045 | 37.740741 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcScheduler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
/**
* Implement this interface to be used for RPC scheduling in the fair call queues.
*/
public interface RpcScheduler {
/**
* Returns priority level greater than zero as a hint for scheduling.
*/
int getPriorityLevel(Schedulable obj);
}
| 1,090 | 35.366667 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/StandbyException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown by a remote server when it is up, but is not the active server in a
* set of servers in which only a subset may be active.
*/
@InterfaceStability.Evolving
public class StandbyException extends IOException {
static final long serialVersionUID = 0x12308AD010L;
public StandbyException(String msg) {
super(msg);
}
}
| 1,268 | 35.257143 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshCallQueueProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol which is used to refresh the call queue in use currently.
*/
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Evolving
public interface RefreshCallQueueProtocol {
/**
* Version 1: Initial version
*/
public static final long versionID = 1L;
/**
* Refresh the callqueue.
* @throws IOException
*/
@Idempotent
void refreshCallQueue() throws IOException;
}
| 1,646 | 32.612245 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInterface.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This interface is implemented by the client side translators and can be used
* to obtain information about underlying protocol e.g. to check if a method is
* supported on the server side.
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
public interface ProtocolMetaInterface {
/**
* Checks whether the given method name is supported by the server.
* It is assumed that all method names are unique for a protocol.
* @param methodName The name of the method
* @return true if method is supported, otherwise false.
* @throws IOException
*/
public boolean isMethodSupported(String methodName) throws IOException;
}
| 1,643 | 37.232558 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.nio.ByteBuffer;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public class RpcConstants {
private RpcConstants() {
// Hidden Constructor
}
public static final int AUTHORIZATION_FAILED_CALL_ID = -1;
public static final int INVALID_CALL_ID = -2;
public static final int CONNECTION_CONTEXT_CALL_ID = -3;
public static final int PING_CALL_ID = -4;
public static final byte[] DUMMY_CLIENT_ID = new byte[0];
public static final int INVALID_RETRY_COUNT = -1;
/**
* The first four bytes of Hadoop RPC connections
*/
public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes
(Charsets.UTF_8));
// 1 : Introduce ping and server does not throw away RPCs
// 3 : Introduce the protocol into the RPC connection header
// 4 : Introduced SASL security layer
// 5 : Introduced use of {@link ArrayPrimitiveWritable$Internal}
// in ObjectWritable to efficiently transmit arrays of primitives
// 6 : Made RPC Request header explicit
// 7 : Changed Ipc Connection Header to use Protocol buffers
// 8 : SASL server always sends a final response
// 9 : Changes to protocol for HADOOP-8990
public static final byte CURRENT_VERSION = 9;
}
| 2,135 | 36.473684 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.net.InetSocketAddress;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
import org.apache.hadoop.net.NetUtils;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* This class maintains a cache of protocol versions and corresponding protocol
* signatures, keyed by server address, protocol and rpc kind.
* The cache is lazily populated.
*/
public class RpcClientUtil {
private static RpcController NULL_CONTROLLER = null;
private static final int PRIME = 16777619;
private static class ProtoSigCacheKey {
private InetSocketAddress serverAddress;
private String protocol;
private String rpcKind;
ProtoSigCacheKey(InetSocketAddress addr, String p, String rk) {
this.serverAddress = addr;
this.protocol = p;
this.rpcKind = rk;
}
@Override //Object
public int hashCode() {
int result = 1;
result = PRIME * result
+ ((serverAddress == null) ? 0 : serverAddress.hashCode());
result = PRIME * result + ((protocol == null) ? 0 : protocol.hashCode());
result = PRIME * result + ((rpcKind == null) ? 0 : rpcKind.hashCode());
return result;
}
@Override //Object
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other instanceof ProtoSigCacheKey) {
ProtoSigCacheKey otherKey = (ProtoSigCacheKey) other;
return (serverAddress.equals(otherKey.serverAddress) &&
protocol.equals(otherKey.protocol) &&
rpcKind.equals(otherKey.rpcKind));
}
return false;
}
}
private static ConcurrentHashMap<ProtoSigCacheKey, Map<Long, ProtocolSignature>>
signatureMap = new ConcurrentHashMap<ProtoSigCacheKey, Map<Long, ProtocolSignature>>();
private static void putVersionSignatureMap(InetSocketAddress addr,
String protocol, String rpcKind, Map<Long, ProtocolSignature> map) {
signatureMap.put(new ProtoSigCacheKey(addr, protocol, rpcKind), map);
}
private static Map<Long, ProtocolSignature> getVersionSignatureMap(
InetSocketAddress addr, String protocol, String rpcKind) {
return signatureMap.get(new ProtoSigCacheKey(addr, protocol, rpcKind));
}
/**
* Returns whether the given method is supported or not.
* The protocol signatures are fetched and cached. The connection id for the
* proxy provided is re-used.
* @param rpcProxy Proxy which provides an existing connection id.
* @param protocol Protocol for which the method check is required.
* @param rpcKind The RpcKind for which the method check is required.
* @param version The version at the client.
* @param methodName Name of the method.
* @return true if the method is supported, false otherwise.
* @throws IOException
*/
public static boolean isMethodSupported(Object rpcProxy, Class<?> protocol,
RPC.RpcKind rpcKind, long version, String methodName) throws IOException {
InetSocketAddress serverAddress = RPC.getServerAddress(rpcProxy);
Map<Long, ProtocolSignature> versionMap = getVersionSignatureMap(
serverAddress, protocol.getName(), rpcKind.toString());
if (versionMap == null) {
Configuration conf = new Configuration();
RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class,
ProtobufRpcEngine.class);
ProtocolMetaInfoPB protocolInfoProxy = getProtocolMetaInfoProxy(rpcProxy,
conf);
GetProtocolSignatureRequestProto.Builder builder =
GetProtocolSignatureRequestProto.newBuilder();
builder.setProtocol(protocol.getName());
builder.setRpcKind(rpcKind.toString());
GetProtocolSignatureResponseProto resp;
try {
resp = protocolInfoProxy.getProtocolSignature(NULL_CONTROLLER,
builder.build());
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
versionMap = convertProtocolSignatureProtos(resp
.getProtocolSignatureList());
putVersionSignatureMap(serverAddress, protocol.getName(),
rpcKind.toString(), versionMap);
}
// Assuming unique method names.
Method desiredMethod;
Method[] allMethods = protocol.getMethods();
desiredMethod = null;
for (Method m : allMethods) {
if (m.getName().equals(methodName)) {
desiredMethod = m;
break;
}
}
if (desiredMethod == null) {
return false;
}
int methodHash = ProtocolSignature.getFingerprint(desiredMethod);
return methodExists(methodHash, version, versionMap);
}
private static Map<Long, ProtocolSignature>
convertProtocolSignatureProtos(List<ProtocolSignatureProto> protoList) {
Map<Long, ProtocolSignature> map = new TreeMap<Long, ProtocolSignature>();
for (ProtocolSignatureProto p : protoList) {
int [] methods = new int[p.getMethodsList().size()];
int index=0;
for (int m : p.getMethodsList()) {
methods[index++] = m;
}
map.put(p.getVersion(), new ProtocolSignature(p.getVersion(), methods));
}
return map;
}
private static boolean methodExists(int methodHash, long version,
Map<Long, ProtocolSignature> versionMap) {
ProtocolSignature sig = versionMap.get(version);
if (sig != null) {
for (int m : sig.getMethods()) {
if (m == methodHash) {
return true;
}
}
}
return false;
}
// The proxy returned re-uses the underlying connection. This is a special
// mechanism for ProtocolMetaInfoPB.
// Don't do this for any other protocol, it might cause a security hole.
private static ProtocolMetaInfoPB getProtocolMetaInfoProxy(Object proxy,
Configuration conf) throws IOException {
RpcInvocationHandler inv = (RpcInvocationHandler) Proxy
.getInvocationHandler(proxy);
return RPC
.getProtocolEngine(ProtocolMetaInfoPB.class, conf)
.getProtocolMetaInfoProxy(inv.getConnectionId(), conf,
NetUtils.getDefaultSocketFactory(conf)).getProxy();
}
/**
* Convert an RPC method to a string.
* The format we want is 'MethodOuterClassShortName#methodName'.
*
* For example, if the method is:
* org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.
* ClientNamenodeProtocol.BlockingInterface.getServerDefaults
*
* the format we want is:
* ClientNamenodeProtocol#getServerDefaults
*/
public static String methodToTraceString(Method method) {
Class<?> clazz = method.getDeclaringClass();
while (true) {
Class<?> next = clazz.getEnclosingClass();
if (next == null || next.getEnclosingClass() == null) break;
clazz = next;
}
return clazz.getSimpleName() + "#" + method.getName();
}
/**
* Convert an RPC class method to a string.
* The format we want is
* 'SecondOutermostClassShortName#OutermostClassShortName'.
*
* For example, if the full class name is:
* org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations
*
* the format we want is:
* ClientProtocol#getBlockLocations
*/
public static String toTraceName(String fullName) {
int lastPeriod = fullName.lastIndexOf('.');
if (lastPeriod < 0) {
return fullName;
}
int secondLastPeriod = fullName.lastIndexOf('.', lastPeriod - 1);
if (secondLastPeriod < 0) {
return fullName;
}
return fullName.substring(secondLastPeriod + 1, lastPeriod) + "#" +
fullName.substring(lastPeriod + 1);
}
}
| 8,845 | 36.168067 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcMultiplexer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
/**
* Implement this interface to make a pluggable multiplexer in the
* FairCallQueue.
*/
public interface RpcMultiplexer {
/**
* Should get current index and optionally perform whatever is needed
* to prepare the next index.
* @return current index
*/
int getAndAdvanceCurrentIndex();
}
| 1,147 | 33.787879 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
/**
* Indicates an exception on the RPC server
*/
public class RpcServerException extends RpcException {
private static final long serialVersionUID = 1L;
/**
* Constructs exception with the specified detail message.
* @param message detailed message.
*/
public RpcServerException(final String message) {
super(message);
}
/**
* Constructs exception with the specified detail message and cause.
*
* @param message message.
* @param cause the cause (can be retried by the {@link #getCause()} method).
* (A <tt>null</tt> value is permitted, and indicates that the cause
* is nonexistent or unknown.)
*/
public RpcServerException(final String message, final Throwable cause) {
super(message, cause);
}
/**
* get the rpc status corresponding to this exception
*/
public RpcStatusProto getRpcStatusProto() {
return RpcStatusProto.ERROR;
}
/**
* get the detailed rpc status corresponding to this exception
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_RPC_SERVER;
}
}
| 2,142 | 33.015873 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.lang.reflect.Proxy;
import java.lang.reflect.Method;
import java.lang.reflect.InvocationTargetException;
import java.net.InetSocketAddress;
import java.io.*;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.net.SocketFactory;
import org.apache.commons.logging.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.RPC.RpcInvoker;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
/** An RpcEngine implementation for Writable data. */
@InterfaceStability.Evolving
public class WritableRpcEngine implements RpcEngine {
private static final Log LOG = LogFactory.getLog(RPC.class);
//writableRpcVersion should be updated if there is a change
//in format of the rpc messages.
// 2L - added declared class to Invocation
public static final long writableRpcVersion = 2L;
/**
* Whether or not this class has been initialized.
*/
private static boolean isInitialized = false;
static {
ensureInitialized();
}
/**
* Initialize this class if it isn't already.
*/
public static synchronized void ensureInitialized() {
if (!isInitialized) {
initialize();
}
}
/**
* Register the rpcRequest deserializer for WritableRpcEngine
*/
private static synchronized void initialize() {
org.apache.hadoop.ipc.Server.registerProtocolEngine(RPC.RpcKind.RPC_WRITABLE,
Invocation.class, new Server.WritableRpcInvoker());
isInitialized = true;
}
/** A method invocation, including the method name and its parameters.*/
private static class Invocation implements Writable, Configurable {
private String methodName;
private Class<?>[] parameterClasses;
private Object[] parameters;
private Configuration conf;
private long clientVersion;
private int clientMethodsHash;
private String declaringClassProtocolName;
//This could be different from static writableRpcVersion when received
//at server, if client is using a different version.
private long rpcVersion;
@SuppressWarnings("unused") // called when deserializing an invocation
public Invocation() {}
public Invocation(Method method, Object[] parameters) {
this.methodName = method.getName();
this.parameterClasses = method.getParameterTypes();
this.parameters = parameters;
rpcVersion = writableRpcVersion;
if (method.getDeclaringClass().equals(VersionedProtocol.class)) {
//VersionedProtocol is exempted from version check.
clientVersion = 0;
clientMethodsHash = 0;
} else {
this.clientVersion = RPC.getProtocolVersion(method.getDeclaringClass());
this.clientMethodsHash = ProtocolSignature.getFingerprint(method
.getDeclaringClass().getMethods());
}
this.declaringClassProtocolName =
RPC.getProtocolName(method.getDeclaringClass());
}
/** The name of the method invoked. */
public String getMethodName() { return methodName; }
/** The parameter classes. */
public Class<?>[] getParameterClasses() { return parameterClasses; }
/** The parameter instances. */
public Object[] getParameters() { return parameters; }
private long getProtocolVersion() {
return clientVersion;
}
@SuppressWarnings("unused")
private int getClientMethodsHash() {
return clientMethodsHash;
}
/**
* Returns the rpc version used by the client.
* @return rpcVersion
*/
public long getRpcVersion() {
return rpcVersion;
}
@Override
@SuppressWarnings("deprecation")
public void readFields(DataInput in) throws IOException {
rpcVersion = in.readLong();
declaringClassProtocolName = UTF8.readString(in);
methodName = UTF8.readString(in);
clientVersion = in.readLong();
clientMethodsHash = in.readInt();
parameters = new Object[in.readInt()];
parameterClasses = new Class[parameters.length];
ObjectWritable objectWritable = new ObjectWritable();
for (int i = 0; i < parameters.length; i++) {
parameters[i] =
ObjectWritable.readObject(in, objectWritable, this.conf);
parameterClasses[i] = objectWritable.getDeclaredClass();
}
}
@Override
@SuppressWarnings("deprecation")
public void write(DataOutput out) throws IOException {
out.writeLong(rpcVersion);
UTF8.writeString(out, declaringClassProtocolName);
UTF8.writeString(out, methodName);
out.writeLong(clientVersion);
out.writeInt(clientMethodsHash);
out.writeInt(parameterClasses.length);
for (int i = 0; i < parameterClasses.length; i++) {
ObjectWritable.writeObject(out, parameters[i], parameterClasses[i],
conf, true);
}
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append(methodName);
buffer.append("(");
for (int i = 0; i < parameters.length; i++) {
if (i != 0)
buffer.append(", ");
buffer.append(parameters[i]);
}
buffer.append(")");
buffer.append(", rpc version="+rpcVersion);
buffer.append(", client version="+clientVersion);
buffer.append(", methodsFingerPrint="+clientMethodsHash);
return buffer.toString();
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return this.conf;
}
}
private static ClientCache CLIENTS=new ClientCache();
private static class Invoker implements RpcInvocationHandler {
private Client.ConnectionId remoteId;
private Client client;
private boolean isClosed = false;
private final AtomicBoolean fallbackToSimpleAuth;
public Invoker(Class<?> protocol,
InetSocketAddress address, UserGroupInformation ticket,
Configuration conf, SocketFactory factory,
int rpcTimeout, AtomicBoolean fallbackToSimpleAuth)
throws IOException {
this.remoteId = Client.ConnectionId.getConnectionId(address, protocol,
ticket, rpcTimeout, conf);
this.client = CLIENTS.getClient(conf, factory);
this.fallbackToSimpleAuth = fallbackToSimpleAuth;
}
@Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
long startTime = 0;
if (LOG.isDebugEnabled()) {
startTime = Time.now();
}
TraceScope traceScope = null;
if (Trace.isTracing()) {
traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
}
ObjectWritable value;
try {
value = (ObjectWritable)
client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args),
remoteId, fallbackToSimpleAuth);
} finally {
if (traceScope != null) traceScope.close();
}
if (LOG.isDebugEnabled()) {
long callTime = Time.now() - startTime;
LOG.debug("Call: " + method.getName() + " " + callTime);
}
return value.get();
}
/* close the IPC client that's responsible for this invoker's RPCs */
@Override
synchronized public void close() {
if (!isClosed) {
isClosed = true;
CLIENTS.stopClient(client);
}
}
@Override
public ConnectionId getConnectionId() {
return remoteId;
}
}
// for unit testing only
@InterfaceAudience.Private
@InterfaceStability.Unstable
static Client getClient(Configuration conf) {
return CLIENTS.getClient(conf);
}
/** Construct a client-side proxy object that implements the named protocol,
* talking to a server at the named address.
* @param <T>*/
@Override
public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
InetSocketAddress addr, UserGroupInformation ticket,
Configuration conf, SocketFactory factory,
int rpcTimeout, RetryPolicy connectionRetryPolicy)
throws IOException {
return getProxy(protocol, clientVersion, addr, ticket, conf, factory,
rpcTimeout, connectionRetryPolicy, null);
}
/** Construct a client-side proxy object that implements the named protocol,
* talking to a server at the named address.
* @param <T>*/
@Override
@SuppressWarnings("unchecked")
public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
InetSocketAddress addr, UserGroupInformation ticket,
Configuration conf, SocketFactory factory,
int rpcTimeout, RetryPolicy connectionRetryPolicy,
AtomicBoolean fallbackToSimpleAuth)
throws IOException {
if (connectionRetryPolicy != null) {
throw new UnsupportedOperationException(
"Not supported: connectionRetryPolicy=" + connectionRetryPolicy);
}
T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(),
new Class[] { protocol }, new Invoker(protocol, addr, ticket, conf,
factory, rpcTimeout, fallbackToSimpleAuth));
return new ProtocolProxy<T>(protocol, proxy, true);
}
/* Construct a server for a protocol implementation instance listening on a
* port and address. */
@Override
public RPC.Server getServer(Class<?> protocolClass,
Object protocolImpl, String bindAddress, int port,
int numHandlers, int numReaders, int queueSizePerHandler,
boolean verbose, Configuration conf,
SecretManager<? extends TokenIdentifier> secretManager,
String portRangeConfig)
throws IOException {
return new Server(protocolClass, protocolImpl, conf, bindAddress, port,
numHandlers, numReaders, queueSizePerHandler, verbose, secretManager,
portRangeConfig);
}
/** An RPC Server. */
public static class Server extends RPC.Server {
/**
* Construct an RPC server.
* @param instance the instance whose methods will be called
* @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on
*
* @deprecated Use #Server(Class, Object, Configuration, String, int)
*/
@Deprecated
public Server(Object instance, Configuration conf, String bindAddress,
int port) throws IOException {
this(null, instance, conf, bindAddress, port);
}
/** Construct an RPC server.
* @param protocolClass class
* @param protocolImpl the instance whose methods will be called
* @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on
*/
public Server(Class<?> protocolClass, Object protocolImpl,
Configuration conf, String bindAddress, int port)
throws IOException {
this(protocolClass, protocolImpl, conf, bindAddress, port, 1, -1, -1,
false, null, null);
}
/**
* Construct an RPC server.
* @param protocolImpl the instance whose methods will be called
* @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on
* @param numHandlers the number of method handler threads to run
* @param verbose whether each call should be logged
*
* @deprecated use Server#Server(Class, Object,
* Configuration, String, int, int, int, int, boolean, SecretManager)
*/
@Deprecated
public Server(Object protocolImpl, Configuration conf, String bindAddress,
int port, int numHandlers, int numReaders, int queueSizePerHandler,
boolean verbose, SecretManager<? extends TokenIdentifier> secretManager)
throws IOException {
this(null, protocolImpl, conf, bindAddress, port,
numHandlers, numReaders, queueSizePerHandler, verbose,
secretManager, null);
}
/**
* Construct an RPC server.
* @param protocolClass - the protocol being registered
* can be null for compatibility with old usage (see below for details)
* @param protocolImpl the protocol impl that will be called
* @param conf the configuration to use
* @param bindAddress the address to bind on to listen for connection
* @param port the port to listen for connections on
* @param numHandlers the number of method handler threads to run
* @param verbose whether each call should be logged
*/
public Server(Class<?> protocolClass, Object protocolImpl,
Configuration conf, String bindAddress, int port,
int numHandlers, int numReaders, int queueSizePerHandler,
boolean verbose, SecretManager<? extends TokenIdentifier> secretManager,
String portRangeConfig)
throws IOException {
super(bindAddress, port, null, numHandlers, numReaders,
queueSizePerHandler, conf,
classNameBase(protocolImpl.getClass().getName()), secretManager,
portRangeConfig);
this.verbose = verbose;
Class<?>[] protocols;
if (protocolClass == null) { // derive protocol from impl
/*
* In order to remain compatible with the old usage where a single
* target protocolImpl is suppled for all protocol interfaces, and
* the protocolImpl is derived from the protocolClass(es)
* we register all interfaces extended by the protocolImpl
*/
protocols = RPC.getProtocolInterfaces(protocolImpl.getClass());
} else {
if (!protocolClass.isAssignableFrom(protocolImpl.getClass())) {
throw new IOException("protocolClass "+ protocolClass +
" is not implemented by protocolImpl which is of class " +
protocolImpl.getClass());
}
// register protocol class and its super interfaces
registerProtocolAndImpl(RPC.RpcKind.RPC_WRITABLE, protocolClass, protocolImpl);
protocols = RPC.getProtocolInterfaces(protocolClass);
}
for (Class<?> p : protocols) {
if (!p.equals(VersionedProtocol.class)) {
registerProtocolAndImpl(RPC.RpcKind.RPC_WRITABLE, p, protocolImpl);
}
}
}
private static void log(String value) {
if (value!= null && value.length() > 55)
value = value.substring(0, 55)+"...";
LOG.info(value);
}
static class WritableRpcInvoker implements RpcInvoker {
@Override
public Writable call(org.apache.hadoop.ipc.RPC.Server server,
String protocolName, Writable rpcRequest, long receivedTime)
throws IOException, RPC.VersionMismatch {
Invocation call = (Invocation)rpcRequest;
if (server.verbose) log("Call: " + call);
// Verify writable rpc version
if (call.getRpcVersion() != writableRpcVersion) {
// Client is using a different version of WritableRpc
throw new RpcServerException(
"WritableRpc version mismatch, client side version="
+ call.getRpcVersion() + ", server side version="
+ writableRpcVersion);
}
long clientVersion = call.getProtocolVersion();
final String protoName;
ProtoClassProtoImpl protocolImpl;
if (call.declaringClassProtocolName.equals(VersionedProtocol.class.getName())) {
// VersionProtocol methods are often used by client to figure out
// which version of protocol to use.
//
// Versioned protocol methods should go the protocolName protocol
// rather than the declaring class of the method since the
// the declaring class is VersionedProtocol which is not
// registered directly.
// Send the call to the highest protocol version
VerProtocolImpl highest = server.getHighestSupportedProtocol(
RPC.RpcKind.RPC_WRITABLE, protocolName);
if (highest == null) {
throw new RpcServerException("Unknown protocol: " + protocolName);
}
protocolImpl = highest.protocolTarget;
} else {
protoName = call.declaringClassProtocolName;
// Find the right impl for the protocol based on client version.
ProtoNameVer pv =
new ProtoNameVer(call.declaringClassProtocolName, clientVersion);
protocolImpl =
server.getProtocolImplMap(RPC.RpcKind.RPC_WRITABLE).get(pv);
if (protocolImpl == null) { // no match for Protocol AND Version
VerProtocolImpl highest =
server.getHighestSupportedProtocol(RPC.RpcKind.RPC_WRITABLE,
protoName);
if (highest == null) {
throw new RpcServerException("Unknown protocol: " + protoName);
} else { // protocol supported but not the version that client wants
throw new RPC.VersionMismatch(protoName, clientVersion,
highest.version);
}
}
}
// Invoke the protocol method
long startTime = Time.now();
int qTime = (int) (startTime-receivedTime);
Exception exception = null;
try {
Method method =
protocolImpl.protocolClass.getMethod(call.getMethodName(),
call.getParameterClasses());
method.setAccessible(true);
server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
Object value =
method.invoke(protocolImpl.protocolImpl, call.getParameters());
if (server.verbose) log("Return: "+value);
return new ObjectWritable(method.getReturnType(), value);
} catch (InvocationTargetException e) {
Throwable target = e.getTargetException();
if (target instanceof IOException) {
exception = (IOException)target;
throw (IOException)target;
} else {
IOException ioe = new IOException(target.toString());
ioe.setStackTrace(target.getStackTrace());
exception = ioe;
throw ioe;
}
} catch (Throwable e) {
if (!(e instanceof IOException)) {
LOG.error("Unexpected throwable object ", e);
}
IOException ioe = new IOException(e.toString());
ioe.setStackTrace(e.getStackTrace());
exception = ioe;
throw ioe;
} finally {
int processingTime = (int) (Time.now() - startTime);
if (LOG.isDebugEnabled()) {
String msg = "Served: " + call.getMethodName() +
" queueTime= " + qTime +
" procesingTime= " + processingTime;
if (exception != null) {
msg += " exception= " + exception.getClass().getSimpleName();
}
LOG.debug(msg);
}
String detailedMetricsName = (exception == null) ?
call.getMethodName() :
exception.getClass().getSimpleName();
server.rpcMetrics.addRpcQueueTime(qTime);
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
processingTime);
}
}
}
}
@Override
public ProtocolProxy<ProtocolMetaInfoPB> getProtocolMetaInfoProxy(
ConnectionId connId, Configuration conf, SocketFactory factory)
throws IOException {
throw new UnsupportedOperationException("This proxy is not supported");
}
}
| 21,213 | 36.480565 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcInvocationHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.Closeable;
import java.lang.reflect.InvocationHandler;
import org.apache.hadoop.ipc.Client.ConnectionId;
/**
* This interface must be implemented by all InvocationHandler
* implementations.
*/
public interface RpcInvocationHandler extends InvocationHandler, Closeable {
/**
* Returns the connection id associated with the InvocationHandler instance.
* @return ConnectionId
*/
ConnectionId getConnectionId();
}
| 1,285 | 33.756757 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.util.ArrayList;
import java.util.Collection;
import com.google.common.base.Joiner;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Used to registry custom methods to refresh at runtime.
* Each identifier maps to one or more RefreshHandlers.
*/
@InterfaceStability.Unstable
public class RefreshRegistry {
public static final Log LOG = LogFactory.getLog(RefreshRegistry.class);
// Used to hold singleton instance
private static class RegistryHolder {
@SuppressWarnings("All")
public static RefreshRegistry registry = new RefreshRegistry();
}
// Singleton access
public static RefreshRegistry defaultRegistry() {
return RegistryHolder.registry;
}
private final Multimap<String, RefreshHandler> handlerTable;
public RefreshRegistry() {
handlerTable = HashMultimap.create();
}
/**
* Registers an object as a handler for a given identity.
* Note: will prevent handler from being GC'd, object should unregister itself
* when done
* @param identifier a unique identifier for this resource,
* such as org.apache.hadoop.blacklist
* @param handler the object to register
*/
public synchronized void register(String identifier, RefreshHandler handler) {
if (identifier == null) {
throw new NullPointerException("Identifier cannot be null");
}
handlerTable.put(identifier, handler);
}
/**
* Remove the registered object for a given identity.
* @param identifier the resource to unregister
* @return the true if removed
*/
public synchronized boolean unregister(String identifier, RefreshHandler handler) {
return handlerTable.remove(identifier, handler);
}
public synchronized void unregisterAll(String identifier) {
handlerTable.removeAll(identifier);
}
/**
* Lookup the responsible handler and return its result.
* This should be called by the RPC server when it gets a refresh request.
* @param identifier the resource to refresh
* @param args the arguments to pass on, not including the program name
* @throws IllegalArgumentException on invalid identifier
* @return the response from the appropriate handler
*/
public synchronized Collection<RefreshResponse> dispatch(String identifier, String[] args) {
Collection<RefreshHandler> handlers = handlerTable.get(identifier);
if (handlers.size() == 0) {
String msg = "Identifier '" + identifier +
"' does not exist in RefreshRegistry. Valid options are: " +
Joiner.on(", ").join(handlerTable.keySet());
throw new IllegalArgumentException(msg);
}
ArrayList<RefreshResponse> responses =
new ArrayList<RefreshResponse>(handlers.size());
// Dispatch to each handler and store response
for(RefreshHandler handler : handlers) {
RefreshResponse response;
// Run the handler
try {
response = handler.handleRefresh(identifier, args);
if (response == null) {
throw new NullPointerException("Handler returned null.");
}
LOG.info(handlerName(handler) + " responds to '" + identifier +
"', says: '" + response.getMessage() + "', returns " +
response.getReturnCode());
} catch (Exception e) {
response = new RefreshResponse(-1, e.getLocalizedMessage());
}
response.setSenderName(handlerName(handler));
responses.add(response);
}
return responses;
}
private String handlerName(RefreshHandler h) {
return h.getClass().getName() + '@' + Integer.toHexString(h.hashCode());
}
}
| 4,607 | 33.133333 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Proxy;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.net.NoRouteToHostException;
import java.net.SocketTimeoutException;
import java.io.*;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.net.SocketFactory;
import org.apache.commons.logging.*;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.io.*;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time;
import com.google.protobuf.BlockingService;
/** A simple RPC mechanism.
*
* A <i>protocol</i> is a Java interface. All parameters and return types must
* be one of:
*
* <ul> <li>a primitive type, <code>boolean</code>, <code>byte</code>,
* <code>char</code>, <code>short</code>, <code>int</code>, <code>long</code>,
* <code>float</code>, <code>double</code>, or <code>void</code>; or</li>
*
* <li>a {@link String}; or</li>
*
* <li>a {@link Writable}; or</li>
*
* <li>an array of the above types</li> </ul>
*
* All methods in the protocol should throw only IOException. No field data of
* the protocol instance is transmitted.
*/
public class RPC {
final static int RPC_SERVICE_CLASS_DEFAULT = 0;
public enum RpcKind {
RPC_BUILTIN ((short) 1), // Used for built in calls by tests
RPC_WRITABLE ((short) 2), // Use WritableRpcEngine
RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
public final short value; //TODO make it private
RpcKind(short val) {
this.value = val;
}
}
interface RpcInvoker {
/**
* Process a client call on the server side
* @param server the server within whose context this rpc call is made
* @param protocol - the protocol name (the class of the client proxy
* used to make calls to the rpc server.
* @param rpcRequest - deserialized
* @param receiveTime time at which the call received (for metrics)
* @return the call's return
* @throws IOException
**/
public Writable call(Server server, String protocol,
Writable rpcRequest, long receiveTime) throws Exception ;
}
static final Log LOG = LogFactory.getLog(RPC.class);
/**
* Get all superInterfaces that extend VersionedProtocol
* @param childInterfaces
* @return the super interfaces that extend VersionedProtocol
*/
static Class<?>[] getSuperInterfaces(Class<?>[] childInterfaces) {
List<Class<?>> allInterfaces = new ArrayList<Class<?>>();
for (Class<?> childInterface : childInterfaces) {
if (VersionedProtocol.class.isAssignableFrom(childInterface)) {
allInterfaces.add(childInterface);
allInterfaces.addAll(
Arrays.asList(
getSuperInterfaces(childInterface.getInterfaces())));
} else {
LOG.warn("Interface " + childInterface +
" ignored because it does not extend VersionedProtocol");
}
}
return allInterfaces.toArray(new Class[allInterfaces.size()]);
}
/**
* Get all interfaces that the given protocol implements or extends
* which are assignable from VersionedProtocol.
*/
static Class<?>[] getProtocolInterfaces(Class<?> protocol) {
Class<?>[] interfaces = protocol.getInterfaces();
return getSuperInterfaces(interfaces);
}
/**
* Get the protocol name.
* If the protocol class has a ProtocolAnnotation, then get the protocol
* name from the annotation; otherwise the class name is the protocol name.
*/
static public String getProtocolName(Class<?> protocol) {
if (protocol == null) {
return null;
}
ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class);
return (anno == null) ? protocol.getName() : anno.protocolName();
}
/**
* Get the protocol version from protocol class.
* If the protocol class has a ProtocolAnnotation, then get the protocol
* name from the annotation; otherwise the class name is the protocol name.
*/
static public long getProtocolVersion(Class<?> protocol) {
if (protocol == null) {
throw new IllegalArgumentException("Null protocol");
}
long version;
ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class);
if (anno != null) {
version = anno.protocolVersion();
if (version != -1)
return version;
}
try {
Field versionField = protocol.getField("versionID");
versionField.setAccessible(true);
return versionField.getLong(protocol);
} catch (NoSuchFieldException ex) {
throw new RuntimeException(ex);
} catch (IllegalAccessException ex) {
throw new RuntimeException(ex);
}
}
private RPC() {} // no public ctor
// cache of RpcEngines by protocol
private static final Map<Class<?>,RpcEngine> PROTOCOL_ENGINES
= new HashMap<Class<?>,RpcEngine>();
private static final String ENGINE_PROP = "rpc.engine";
/**
* Set a protocol to use a non-default RpcEngine.
* @param conf configuration to use
* @param protocol the protocol interface
* @param engine the RpcEngine impl
*/
public static void setProtocolEngine(Configuration conf,
Class<?> protocol, Class<?> engine) {
conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine, RpcEngine.class);
}
// return the RpcEngine configured to handle a protocol
static synchronized RpcEngine getProtocolEngine(Class<?> protocol,
Configuration conf) {
RpcEngine engine = PROTOCOL_ENGINES.get(protocol);
if (engine == null) {
Class<?> impl = conf.getClass(ENGINE_PROP+"."+protocol.getName(),
WritableRpcEngine.class);
engine = (RpcEngine)ReflectionUtils.newInstance(impl, conf);
PROTOCOL_ENGINES.put(protocol, engine);
}
return engine;
}
/**
* A version mismatch for the RPC protocol.
*/
public static class VersionMismatch extends RpcServerException {
private static final long serialVersionUID = 0;
private String interfaceName;
private long clientVersion;
private long serverVersion;
/**
* Create a version mismatch exception
* @param interfaceName the name of the protocol mismatch
* @param clientVersion the client's version of the protocol
* @param serverVersion the server's version of the protocol
*/
public VersionMismatch(String interfaceName, long clientVersion,
long serverVersion) {
super("Protocol " + interfaceName + " version mismatch. (client = " +
clientVersion + ", server = " + serverVersion + ")");
this.interfaceName = interfaceName;
this.clientVersion = clientVersion;
this.serverVersion = serverVersion;
}
/**
* Get the interface name
* @return the java class name
* (eg. org.apache.hadoop.mapred.InterTrackerProtocol)
*/
public String getInterfaceName() {
return interfaceName;
}
/**
* Get the client's preferred version
*/
public long getClientVersion() {
return clientVersion;
}
/**
* Get the server's agreed to version.
*/
public long getServerVersion() {
return serverVersion;
}
/**
* get the rpc status corresponding to this exception
*/
public RpcStatusProto getRpcStatusProto() {
return RpcStatusProto.ERROR;
}
/**
* get the detailed rpc status corresponding to this exception
*/
public RpcErrorCodeProto getRpcErrorCodeProto() {
return RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH;
}
}
/**
* Get a proxy connection to a remote server
*
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @return the proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> T waitForProxy(
Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
Configuration conf
) throws IOException {
return waitForProtocolProxy(protocol, clientVersion, addr, conf).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @return the protocol proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> waitForProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
Configuration conf) throws IOException {
return waitForProtocolProxy(
protocol, clientVersion, addr, conf, Long.MAX_VALUE);
}
/**
* Get a proxy connection to a remote server
*
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @param connTimeout time in milliseconds before giving up
* @return the proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> T waitForProxy(Class<T> protocol, long clientVersion,
InetSocketAddress addr, Configuration conf,
long connTimeout) throws IOException {
return waitForProtocolProxy(protocol, clientVersion, addr,
conf, connTimeout).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @param connTimeout time in milliseconds before giving up
* @return the protocol proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> waitForProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
long connTimeout) throws IOException {
return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, null, connTimeout);
}
/**
* Get a proxy connection to a remote server
*
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @param rpcTimeout timeout for each RPC
* @param timeout time in milliseconds before giving up
* @return the proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> T waitForProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
int rpcTimeout,
long timeout) throws IOException {
return waitForProtocolProxy(protocol, clientVersion, addr,
conf, rpcTimeout, null, timeout).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @param rpcTimeout timeout for each RPC
* @param timeout time in milliseconds before giving up
* @return the proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> waitForProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
int rpcTimeout,
RetryPolicy connectionRetryPolicy,
long timeout) throws IOException {
long startTime = Time.now();
IOException ioe;
while (true) {
try {
return getProtocolProxy(protocol, clientVersion, addr,
UserGroupInformation.getCurrentUser(), conf, NetUtils
.getDefaultSocketFactory(conf), rpcTimeout, connectionRetryPolicy);
} catch(ConnectException se) { // namenode has not been started
LOG.info("Server at " + addr + " not available yet, Zzzzz...");
ioe = se;
} catch(SocketTimeoutException te) { // namenode is busy
LOG.info("Problem connecting to server: " + addr);
ioe = te;
} catch(NoRouteToHostException nrthe) { // perhaps a VIP is failing over
LOG.info("No route to host for server: " + addr);
ioe = nrthe;
}
// check if timed out
if (Time.now()-timeout >= startTime) {
throw ioe;
}
if (Thread.currentThread().isInterrupted()) {
// interrupted during some IO; this may not have been caught
throw new InterruptedIOException("Interrupted waiting for the proxy");
}
// wait for retry
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw (IOException) new InterruptedIOException(
"Interrupted waiting for the proxy").initCause(ioe);
}
}
}
/** Construct a client-side proxy object that implements the named protocol,
* talking to a server at the named address.
* @param <T>*/
public static <T> T getProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
SocketFactory factory) throws IOException {
return getProtocolProxy(
protocol, clientVersion, addr, conf, factory).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param conf configuration to use
* @param factory socket factory
* @return the protocol proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
SocketFactory factory) throws IOException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
return getProtocolProxy(protocol, clientVersion, addr, ugi, conf, factory);
}
/** Construct a client-side proxy object that implements the named protocol,
* talking to a server at the named address.
* @param <T>*/
public static <T> T getProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory) throws IOException {
return getProtocolProxy(
protocol, clientVersion, addr, ticket, conf, factory).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param protocol protocol class
* @param clientVersion client version
* @param addr remote address
* @param ticket user group information
* @param conf configuration to use
* @param factory socket factory
* @return the protocol proxy
* @throws IOException if the far end through a RemoteException
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory) throws IOException {
return getProtocolProxy(
protocol, clientVersion, addr, ticket, conf, factory, 0, null);
}
/**
* Construct a client-side proxy that implements the named protocol,
* talking to a server at the named address.
* @param <T>
*
* @param protocol protocol
* @param clientVersion client's version
* @param addr server address
* @param ticket security ticket
* @param conf configuration
* @param factory socket factory
* @param rpcTimeout max time for each rpc; 0 means no timeout
* @return the proxy
* @throws IOException if any error occurs
*/
public static <T> T getProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory,
int rpcTimeout) throws IOException {
return getProtocolProxy(protocol, clientVersion, addr, ticket,
conf, factory, rpcTimeout, null).getProxy();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param protocol protocol
* @param clientVersion client's version
* @param addr server address
* @param ticket security ticket
* @param conf configuration
* @param factory socket factory
* @param rpcTimeout max time for each rpc; 0 means no timeout
* @param connectionRetryPolicy retry policy
* @return the proxy
* @throws IOException if any error occurs
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory,
int rpcTimeout,
RetryPolicy connectionRetryPolicy) throws IOException {
return getProtocolProxy(protocol, clientVersion, addr, ticket,
conf, factory, rpcTimeout, connectionRetryPolicy, null);
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param protocol protocol
* @param clientVersion client's version
* @param addr server address
* @param ticket security ticket
* @param conf configuration
* @param factory socket factory
* @param rpcTimeout max time for each rpc; 0 means no timeout
* @param connectionRetryPolicy retry policy
* @param fallbackToSimpleAuth set to true or false during calls to indicate if
* a secure client falls back to simple auth
* @return the proxy
* @throws IOException if any error occurs
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory,
int rpcTimeout,
RetryPolicy connectionRetryPolicy,
AtomicBoolean fallbackToSimpleAuth)
throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
SaslRpcServer.init(conf);
}
return getProtocolEngine(protocol, conf).getProxy(protocol, clientVersion,
addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy,
fallbackToSimpleAuth);
}
/**
* Construct a client-side proxy object with the default SocketFactory
* @param <T>
*
* @param protocol
* @param clientVersion
* @param addr
* @param conf
* @return a proxy instance
* @throws IOException
*/
public static <T> T getProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf)
throws IOException {
return getProtocolProxy(protocol, clientVersion, addr, conf).getProxy();
}
/**
* Returns the server address for a given proxy.
*/
public static InetSocketAddress getServerAddress(Object proxy) {
return getConnectionIdForProxy(proxy).getAddress();
}
/**
* Return the connection ID of the given object. If the provided object is in
* fact a protocol translator, we'll get the connection ID of the underlying
* proxy object.
*
* @param proxy the proxy object to get the connection ID of.
* @return the connection ID for the provided proxy object.
*/
public static ConnectionId getConnectionIdForProxy(Object proxy) {
if (proxy instanceof ProtocolTranslator) {
proxy = ((ProtocolTranslator)proxy).getUnderlyingProxyObject();
}
RpcInvocationHandler inv = (RpcInvocationHandler) Proxy
.getInvocationHandler(proxy);
return inv.getConnectionId();
}
/**
* Get a protocol proxy that contains a proxy connection to a remote server
* and a set of methods that are supported by the server
*
* @param protocol
* @param clientVersion
* @param addr
* @param conf
* @return a protocol proxy
* @throws IOException
*/
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf)
throws IOException {
return getProtocolProxy(protocol, clientVersion, addr, conf, NetUtils
.getDefaultSocketFactory(conf));
}
/**
* Stop the proxy. Proxy must either implement {@link Closeable} or must have
* associated {@link RpcInvocationHandler}.
*
* @param proxy
* the RPC proxy object to be stopped
* @throws HadoopIllegalArgumentException
* if the proxy does not implement {@link Closeable} interface or
* does not have closeable {@link InvocationHandler}
*/
public static void stopProxy(Object proxy) {
if (proxy == null) {
throw new HadoopIllegalArgumentException(
"Cannot close proxy since it is null");
}
try {
if (proxy instanceof Closeable) {
((Closeable) proxy).close();
return;
} else {
InvocationHandler handler = Proxy.getInvocationHandler(proxy);
if (handler instanceof Closeable) {
((Closeable) handler).close();
return;
}
}
} catch (IOException e) {
LOG.error("Closing proxy or invocation handler caused exception", e);
} catch (IllegalArgumentException e) {
LOG.error("RPC.stopProxy called on non proxy: class=" + proxy.getClass().getName(), e);
}
// If you see this error on a mock object in a unit test you're
// developing, make sure to use MockitoUtil.mockProtocol() to
// create your mock.
throw new HadoopIllegalArgumentException(
"Cannot close proxy - is not Closeable or "
+ "does not provide closeable invocation handler "
+ proxy.getClass());
}
/**
* Class to construct instances of RPC server with specific options.
*/
public static class Builder {
private Class<?> protocol = null;
private Object instance = null;
private String bindAddress = "0.0.0.0";
private int port = 0;
private int numHandlers = 1;
private int numReaders = -1;
private int queueSizePerHandler = -1;
private boolean verbose = false;
private final Configuration conf;
private SecretManager<? extends TokenIdentifier> secretManager = null;
private String portRangeConfig = null;
public Builder(Configuration conf) {
this.conf = conf;
}
/** Mandatory field */
public Builder setProtocol(Class<?> protocol) {
this.protocol = protocol;
return this;
}
/** Mandatory field */
public Builder setInstance(Object instance) {
this.instance = instance;
return this;
}
/** Default: 0.0.0.0 */
public Builder setBindAddress(String bindAddress) {
this.bindAddress = bindAddress;
return this;
}
/** Default: 0 */
public Builder setPort(int port) {
this.port = port;
return this;
}
/** Default: 1 */
public Builder setNumHandlers(int numHandlers) {
this.numHandlers = numHandlers;
return this;
}
/** Default: -1 */
public Builder setnumReaders(int numReaders) {
this.numReaders = numReaders;
return this;
}
/** Default: -1 */
public Builder setQueueSizePerHandler(int queueSizePerHandler) {
this.queueSizePerHandler = queueSizePerHandler;
return this;
}
/** Default: false */
public Builder setVerbose(boolean verbose) {
this.verbose = verbose;
return this;
}
/** Default: null */
public Builder setSecretManager(
SecretManager<? extends TokenIdentifier> secretManager) {
this.secretManager = secretManager;
return this;
}
/** Default: null */
public Builder setPortRangeConfig(String portRangeConfig) {
this.portRangeConfig = portRangeConfig;
return this;
}
/**
* Build the RPC Server.
* @throws IOException on error
* @throws HadoopIllegalArgumentException when mandatory fields are not set
*/
public Server build() throws IOException, HadoopIllegalArgumentException {
if (this.conf == null) {
throw new HadoopIllegalArgumentException("conf is not set");
}
if (this.protocol == null) {
throw new HadoopIllegalArgumentException("protocol is not set");
}
if (this.instance == null) {
throw new HadoopIllegalArgumentException("instance is not set");
}
return getProtocolEngine(this.protocol, this.conf).getServer(
this.protocol, this.instance, this.bindAddress, this.port,
this.numHandlers, this.numReaders, this.queueSizePerHandler,
this.verbose, this.conf, this.secretManager, this.portRangeConfig);
}
}
/** An RPC Server. */
public abstract static class Server extends org.apache.hadoop.ipc.Server {
boolean verbose;
static String classNameBase(String className) {
String[] names = className.split("\\.", -1);
if (names == null || names.length == 0) {
return className;
}
return names[names.length-1];
}
/**
* Store a map of protocol and version to its implementation
*/
/**
* The key in Map
*/
static class ProtoNameVer {
final String protocol;
final long version;
ProtoNameVer(String protocol, long ver) {
this.protocol = protocol;
this.version = ver;
}
@Override
public boolean equals(Object o) {
if (o == null)
return false;
if (this == o)
return true;
if (! (o instanceof ProtoNameVer))
return false;
ProtoNameVer pv = (ProtoNameVer) o;
return ((pv.protocol.equals(this.protocol)) &&
(pv.version == this.version));
}
@Override
public int hashCode() {
return protocol.hashCode() * 37 + (int) version;
}
}
/**
* The value in map
*/
static class ProtoClassProtoImpl {
final Class<?> protocolClass;
final Object protocolImpl;
ProtoClassProtoImpl(Class<?> protocolClass, Object protocolImpl) {
this.protocolClass = protocolClass;
this.protocolImpl = protocolImpl;
}
}
ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>> protocolImplMapArray =
new ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>>(RpcKind.MAX_INDEX);
Map<ProtoNameVer, ProtoClassProtoImpl> getProtocolImplMap(RPC.RpcKind rpcKind) {
if (protocolImplMapArray.size() == 0) {// initialize for all rpc kinds
for (int i=0; i <= RpcKind.MAX_INDEX; ++i) {
protocolImplMapArray.add(
new HashMap<ProtoNameVer, ProtoClassProtoImpl>(10));
}
}
return protocolImplMapArray.get(rpcKind.ordinal());
}
// Register protocol and its impl for rpc calls
void registerProtocolAndImpl(RpcKind rpcKind, Class<?> protocolClass,
Object protocolImpl) {
String protocolName = RPC.getProtocolName(protocolClass);
long version;
try {
version = RPC.getProtocolVersion(protocolClass);
} catch (Exception ex) {
LOG.warn("Protocol " + protocolClass +
" NOT registered as cannot get protocol version ");
return;
}
getProtocolImplMap(rpcKind).put(new ProtoNameVer(protocolName, version),
new ProtoClassProtoImpl(protocolClass, protocolImpl));
if (LOG.isDebugEnabled()) {
LOG.debug("RpcKind = " + rpcKind + " Protocol Name = " + protocolName +
" version=" + version +
" ProtocolImpl=" + protocolImpl.getClass().getName() +
" protocolClass=" + protocolClass.getName());
}
}
static class VerProtocolImpl {
final long version;
final ProtoClassProtoImpl protocolTarget;
VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) {
this.version = ver;
this.protocolTarget = protocolTarget;
}
}
VerProtocolImpl[] getSupportedProtocolVersions(RPC.RpcKind rpcKind,
String protocolName) {
VerProtocolImpl[] resultk =
new VerProtocolImpl[getProtocolImplMap(rpcKind).size()];
int i = 0;
for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv :
getProtocolImplMap(rpcKind).entrySet()) {
if (pv.getKey().protocol.equals(protocolName)) {
resultk[i++] =
new VerProtocolImpl(pv.getKey().version, pv.getValue());
}
}
if (i == 0) {
return null;
}
VerProtocolImpl[] result = new VerProtocolImpl[i];
System.arraycopy(resultk, 0, result, 0, i);
return result;
}
VerProtocolImpl getHighestSupportedProtocol(RpcKind rpcKind,
String protocolName) {
Long highestVersion = 0L;
ProtoClassProtoImpl highest = null;
if (LOG.isDebugEnabled()) {
LOG.debug("Size of protoMap for " + rpcKind + " ="
+ getProtocolImplMap(rpcKind).size());
}
for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv :
getProtocolImplMap(rpcKind).entrySet()) {
if (pv.getKey().protocol.equals(protocolName)) {
if ((highest == null) || (pv.getKey().version > highestVersion)) {
highest = pv.getValue();
highestVersion = pv.getKey().version;
}
}
}
if (highest == null) {
return null;
}
return new VerProtocolImpl(highestVersion, highest);
}
protected Server(String bindAddress, int port,
Class<? extends Writable> paramClass, int handlerCount,
int numReaders, int queueSizePerHandler,
Configuration conf, String serverName,
SecretManager<? extends TokenIdentifier> secretManager,
String portRangeConfig) throws IOException {
super(bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler,
conf, serverName, secretManager, portRangeConfig);
initProtocolMetaInfo(conf);
}
private void initProtocolMetaInfo(Configuration conf) {
RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class,
ProtobufRpcEngine.class);
ProtocolMetaInfoServerSideTranslatorPB xlator =
new ProtocolMetaInfoServerSideTranslatorPB(this);
BlockingService protocolInfoBlockingService = ProtocolInfoService
.newReflectiveBlockingService(xlator);
addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, ProtocolMetaInfoPB.class,
protocolInfoBlockingService);
}
/**
* Add a protocol to the existing server.
* @param protocolClass - the protocol class
* @param protocolImpl - the impl of the protocol that will be called
* @return the server (for convenience)
*/
public Server addProtocol(RpcKind rpcKind, Class<?> protocolClass,
Object protocolImpl) {
registerProtocolAndImpl(rpcKind, protocolClass, protocolImpl);
return this;
}
@Override
public Writable call(RPC.RpcKind rpcKind, String protocol,
Writable rpcRequest, long receiveTime) throws Exception {
return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest,
receiveTime);
}
}
}
| 34,991 | 34.815763 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Return a response in the handler method for the user to see.
* Useful since you may want to display status to a user even though an
* error has not occurred.
*/
@InterfaceStability.Unstable
public class RefreshResponse {
private int returnCode = -1;
private String message;
private String senderName;
/**
* Convenience method to create a response for successful refreshes.
* @return void response
*/
public static RefreshResponse successResponse() {
return new RefreshResponse(0, "Success");
}
// Most RefreshHandlers will use this
public RefreshResponse(int returnCode, String message) {
this.returnCode = returnCode;
this.message = message;
}
/**
* Optionally set the sender of this RefreshResponse.
* This helps clarify things when multiple handlers respond.
* @param name The name of the sender
*/
public void setSenderName(String name) {
senderName = name;
}
public String getSenderName() { return senderName; }
public int getReturnCode() { return returnCode; }
public void setReturnCode(int rc) { returnCode = rc; }
public void setMessage(String m) { message = m; }
public String getMessage() { return message; }
@Override
public String toString() {
String ret = "";
if (senderName != null) {
ret += senderName + ": ";
}
if (message != null) {
ret += message;
}
ret += " (exit " + returnCode + ")";
return ret;
}
}
| 2,357 | 28.848101 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* The protocol name that is used when a client and server connect.
* By default the class name of the protocol interface is the protocol name.
*
* Why override the default name (i.e. the class name)?
* One use case overriding the default name (i.e. the class name) is when
* there are multiple implementations of the same protocol, each with say a
* different version/serialization.
* In Hadoop this is used to allow multiple server and client adapters
* for different versions of the same protocol service.
*/
@Retention(RetentionPolicy.RUNTIME)
public @interface ProtocolInfo {
String protocolName(); // the name of the protocol (i.e. rpc service)
long protocolVersion() default -1; // default means not defined use old way
}
| 1,674 | 40.875 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/IdentityProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* The IdentityProvider creates identities for each schedulable
* by extracting fields and returning an identity string.
*
* Implementers will be able to change how schedulers treat
* Schedulables.
*/
@InterfaceAudience.Private
public interface IdentityProvider {
/**
* Return the string used for scheduling.
* @param obj the schedulable to use.
* @return string identity, or null if no identity could be made.
*/
public String makeIdentity(Schedulable obj);
}
| 1,385 | 35.473684 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.util.HashMap;
import java.util.Map;
import javax.net.SocketFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.ObjectWritable;
import org.apache.hadoop.io.Writable;
/* Cache a client using its socket factory as the hash key */
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class ClientCache {
private Map<SocketFactory, Client> clients =
new HashMap<SocketFactory, Client>();
/**
* Construct & cache an IPC client with the user-provided SocketFactory
* if no cached client exists.
*
* @param conf Configuration
* @param factory SocketFactory for client socket
* @param valueClass Class of the expected response
* @return an IPC client
*/
public synchronized Client getClient(Configuration conf,
SocketFactory factory, Class<? extends Writable> valueClass) {
// Construct & cache client. The configuration is only used for timeout,
// and Clients have connection pools. So we can either (a) lose some
// connection pooling and leak sockets, or (b) use the same timeout for all
// configurations. Since the IPC is usually intended globally, not
// per-job, we choose (a).
Client client = clients.get(factory);
if (client == null) {
client = new Client(valueClass, conf, factory);
clients.put(factory, client);
} else {
client.incCount();
}
if (Client.LOG.isDebugEnabled()) {
Client.LOG.debug("getting client out of cache: " + client);
}
return client;
}
/**
* Construct & cache an IPC client with the default SocketFactory
* and default valueClass if no cached client exists.
*
* @param conf Configuration
* @return an IPC client
*/
public synchronized Client getClient(Configuration conf) {
return getClient(conf, SocketFactory.getDefault(), ObjectWritable.class);
}
/**
* Construct & cache an IPC client with the user-provided SocketFactory
* if no cached client exists. Default response type is ObjectWritable.
*
* @param conf Configuration
* @param factory SocketFactory for client socket
* @return an IPC client
*/
public synchronized Client getClient(Configuration conf, SocketFactory factory) {
return this.getClient(conf, factory, ObjectWritable.class);
}
/**
* Stop a RPC client connection
* A RPC client is closed only when its reference count becomes zero.
*/
public void stopClient(Client client) {
if (Client.LOG.isDebugEnabled()) {
Client.LOG.debug("stopping client from cache: " + client);
}
synchronized (this) {
client.decCount();
if (client.isZeroReference()) {
if (Client.LOG.isDebugEnabled()) {
Client.LOG.debug("removing client from cache: " + client);
}
clients.remove(client.getSocketFactory());
}
}
if (client.isZeroReference()) {
if (Client.LOG.isDebugEnabled()) {
Client.LOG.debug("stopping actual client because no more references remain: "
+ client);
}
client.stop();
}
}
}
| 4,076 | 33.846154 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import org.apache.hadoop.ipc.RPC.Server.VerProtocolImpl;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolVersionsRequestProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolVersionsResponseProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolVersionProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* This class serves the requests for protocol versions and signatures by
* looking them up in the server registry.
*/
public class ProtocolMetaInfoServerSideTranslatorPB implements
ProtocolMetaInfoPB {
RPC.Server server;
public ProtocolMetaInfoServerSideTranslatorPB(RPC.Server server) {
this.server = server;
}
@Override
public GetProtocolVersionsResponseProto getProtocolVersions(
RpcController controller, GetProtocolVersionsRequestProto request)
throws ServiceException {
String protocol = request.getProtocol();
GetProtocolVersionsResponseProto.Builder builder =
GetProtocolVersionsResponseProto.newBuilder();
for (RPC.RpcKind r : RPC.RpcKind.values()) {
long[] versions;
try {
versions = getProtocolVersionForRpcKind(r, protocol);
} catch (ClassNotFoundException e) {
throw new ServiceException(e);
}
ProtocolVersionProto.Builder b = ProtocolVersionProto.newBuilder();
if (versions != null) {
b.setRpcKind(r.toString());
for (long v : versions) {
b.addVersions(v);
}
}
builder.addProtocolVersions(b.build());
}
return builder.build();
}
@Override
public GetProtocolSignatureResponseProto getProtocolSignature(
RpcController controller, GetProtocolSignatureRequestProto request)
throws ServiceException {
GetProtocolSignatureResponseProto.Builder builder = GetProtocolSignatureResponseProto
.newBuilder();
String protocol = request.getProtocol();
String rpcKind = request.getRpcKind();
long[] versions;
try {
versions = getProtocolVersionForRpcKind(RPC.RpcKind.valueOf(rpcKind),
protocol);
} catch (ClassNotFoundException e1) {
throw new ServiceException(e1);
}
if (versions == null) {
return builder.build();
}
for (long v : versions) {
ProtocolSignatureProto.Builder sigBuilder = ProtocolSignatureProto
.newBuilder();
sigBuilder.setVersion(v);
try {
ProtocolSignature signature = ProtocolSignature.getProtocolSignature(
protocol, v);
for (int m : signature.getMethods()) {
sigBuilder.addMethods(m);
}
} catch (ClassNotFoundException e) {
throw new ServiceException(e);
}
builder.addProtocolSignature(sigBuilder.build());
}
return builder.build();
}
private long[] getProtocolVersionForRpcKind(RPC.RpcKind rpcKind,
String protocol) throws ClassNotFoundException {
Class<?> protocolClass = Class.forName(protocol);
String protocolName = RPC.getProtocolName(protocolClass);
VerProtocolImpl[] vers = server.getSupportedProtocolVersions(rpcKind,
protocolName);
if (vers == null) {
return null;
}
long [] versions = new long[vers.length];
for (int i=0; i<versions.length; i++) {
versions[i] = vers[i].version;
}
return versions;
}
}
| 4,488 | 35.795082 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UnexpectedServerException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
/**
* Indicates that the RPC server encountered an undeclared exception from the
* service
*/
public class UnexpectedServerException extends RpcException {
private static final long serialVersionUID = 1L;
/**
* Constructs exception with the specified detail message.
*
* @param messages detailed message.
*/
UnexpectedServerException(final String message) {
super(message);
}
/**
* Constructs exception with the specified detail message and cause.
*
* @param message message.
* @param cause that cause this exception
* @param cause the cause (can be retried by the {@link #getCause()} method).
* (A <tt>null</tt> value is permitted, and indicates that the cause
* is nonexistent or unknown.)
*/
UnexpectedServerException(final String message, final Throwable cause) {
super(message, cause);
}
}
| 1,720 | 34.122449 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.apache.hadoop.ipc.RpcConstants.AUTHORIZATION_FAILED_CALL_ID;
import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
import static org.apache.hadoop.ipc.RpcConstants.CURRENT_VERSION;
import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.lang.reflect.UndeclaredThrowableException;
import java.net.BindException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.nio.channels.CancelledKeyException;
import java.nio.channels.Channels;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
import java.nio.channels.WritableByteChannel;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseMessageWrapper;
import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseWrapper;
import org.apache.hadoop.ipc.RPC.RpcInvoker;
import org.apache.hadoop.ipc.RPC.VersionMismatch;
import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
import org.apache.hadoop.ipc.metrics.RpcMetrics;
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcKindProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto.SaslAuth;
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcSaslProto.SaslState;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SaslPropertiesResolver;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.ProtoUtil;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceInfo;
import org.apache.htrace.TraceScope;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.ByteString;
import com.google.protobuf.CodedOutputStream;
import com.google.protobuf.Message;
import com.google.protobuf.Message.Builder;
/** An abstract IPC service. IPC calls take a single {@link Writable} as a
* parameter, and return a {@link Writable} as their value. A service runs on
* a port and is defined by a parameter class and a value class.
*
* @see Client
*/
public abstract class Server {
private final boolean authorize;
private List<AuthMethod> enabledAuthMethods;
private RpcSaslProto negotiateResponse;
private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
public void addTerseExceptions(Class<?>... exceptionClass) {
exceptionsHandler.addTerseExceptions(exceptionClass);
}
/**
* ExceptionsHandler manages Exception groups for special handling
* e.g., terse exception group for concise logging messages
*/
static class ExceptionsHandler {
private volatile Set<String> terseExceptions = new HashSet<String>();
/**
* Add exception class so server won't log its stack trace.
* Modifying the terseException through this method is thread safe.
*
* @param exceptionClass exception classes
*/
void addTerseExceptions(Class<?>... exceptionClass) {
// Make a copy of terseException for performing modification
final HashSet<String> newSet = new HashSet<String>(terseExceptions);
// Add all class names into the HashSet
for (Class<?> name : exceptionClass) {
newSet.add(name.toString());
}
// Replace terseException set
terseExceptions = Collections.unmodifiableSet(newSet);
}
boolean isTerse(Class<?> t) {
return terseExceptions.contains(t.toString());
}
}
/**
* If the user accidentally sends an HTTP GET to an IPC port, we detect this
* and send back a nicer response.
*/
private static final ByteBuffer HTTP_GET_BYTES = ByteBuffer.wrap(
"GET ".getBytes(Charsets.UTF_8));
/**
* An HTTP response to send back if we detect an HTTP request to our IPC
* port.
*/
static final String RECEIVED_HTTP_REQ_RESPONSE =
"HTTP/1.1 404 Not Found\r\n" +
"Content-type: text/plain\r\n\r\n" +
"It looks like you are making an HTTP request to a Hadoop IPC port. " +
"This is not the correct port for the web interface on this daemon.\r\n";
/**
* Initial and max size of response buffer
*/
static int INITIAL_RESP_BUF_SIZE = 10240;
static class RpcKindMapValue {
final Class<? extends Writable> rpcRequestWrapperClass;
final RpcInvoker rpcInvoker;
RpcKindMapValue (Class<? extends Writable> rpcRequestWrapperClass,
RpcInvoker rpcInvoker) {
this.rpcInvoker = rpcInvoker;
this.rpcRequestWrapperClass = rpcRequestWrapperClass;
}
}
static Map<RPC.RpcKind, RpcKindMapValue> rpcKindMap = new
HashMap<RPC.RpcKind, RpcKindMapValue>(4);
/**
* Register a RPC kind and the class to deserialize the rpc request.
*
* Called by static initializers of rpcKind Engines
* @param rpcKind
* @param rpcRequestWrapperClass - this class is used to deserialze the
* the rpc request.
* @param rpcInvoker - use to process the calls on SS.
*/
public static void registerProtocolEngine(RPC.RpcKind rpcKind,
Class<? extends Writable> rpcRequestWrapperClass,
RpcInvoker rpcInvoker) {
RpcKindMapValue old =
rpcKindMap.put(rpcKind, new RpcKindMapValue(rpcRequestWrapperClass, rpcInvoker));
if (old != null) {
rpcKindMap.put(rpcKind, old);
throw new IllegalArgumentException("ReRegistration of rpcKind: " +
rpcKind);
}
if (LOG.isDebugEnabled()) {
LOG.debug("rpcKind=" + rpcKind +
", rpcRequestWrapperClass=" + rpcRequestWrapperClass +
", rpcInvoker=" + rpcInvoker);
}
}
public Class<? extends Writable> getRpcRequestWrapper(
RpcKindProto rpcKind) {
if (rpcRequestClass != null)
return rpcRequestClass;
RpcKindMapValue val = rpcKindMap.get(ProtoUtil.convert(rpcKind));
return (val == null) ? null : val.rpcRequestWrapperClass;
}
public static RpcInvoker getRpcInvoker(RPC.RpcKind rpcKind) {
RpcKindMapValue val = rpcKindMap.get(rpcKind);
return (val == null) ? null : val.rpcInvoker;
}
public static final Log LOG = LogFactory.getLog(Server.class);
public static final Log AUDITLOG =
LogFactory.getLog("SecurityLogger."+Server.class.getName());
private static final String AUTH_FAILED_FOR = "Auth failed for ";
private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
private static final ThreadLocal<Server> SERVER = new ThreadLocal<Server>();
private static final Map<String, Class<?>> PROTOCOL_CACHE =
new ConcurrentHashMap<String, Class<?>>();
static Class<?> getProtocolClass(String protocolName, Configuration conf)
throws ClassNotFoundException {
Class<?> protocol = PROTOCOL_CACHE.get(protocolName);
if (protocol == null) {
protocol = conf.getClassByName(protocolName);
PROTOCOL_CACHE.put(protocolName, protocol);
}
return protocol;
}
/** Returns the server instance called under or null. May be called under
* {@link #call(Writable, long)} implementations, and under {@link Writable}
* methods of paramters and return values. Permits applications to access
* the server context.*/
public static Server get() {
return SERVER.get();
}
/** This is set to Call object before Handler invokes an RPC and reset
* after the call returns.
*/
private static final ThreadLocal<Call> CurCall = new ThreadLocal<Call>();
/** Get the current call */
@VisibleForTesting
public static ThreadLocal<Call> getCurCall() {
return CurCall;
}
/**
* Returns the currently active RPC call's sequential ID number. A negative
* call ID indicates an invalid value, such as if there is no currently active
* RPC call.
*
* @return int sequential ID number of currently active RPC call
*/
public static int getCallId() {
Call call = CurCall.get();
return call != null ? call.callId : RpcConstants.INVALID_CALL_ID;
}
/**
* @return The current active RPC call's retry count. -1 indicates the retry
* cache is not supported in the client side.
*/
public static int getCallRetryCount() {
Call call = CurCall.get();
return call != null ? call.retryCount : RpcConstants.INVALID_RETRY_COUNT;
}
/** Returns the remote side ip address when invoked inside an RPC
* Returns null incase of an error.
*/
public static InetAddress getRemoteIp() {
Call call = CurCall.get();
return (call != null && call.connection != null) ? call.connection
.getHostInetAddress() : null;
}
/**
* Returns the clientId from the current RPC request
*/
public static byte[] getClientId() {
Call call = CurCall.get();
return call != null ? call.clientId : RpcConstants.DUMMY_CLIENT_ID;
}
/** Returns remote address as a string when invoked inside an RPC.
* Returns null in case of an error.
*/
public static String getRemoteAddress() {
InetAddress addr = getRemoteIp();
return (addr == null) ? null : addr.getHostAddress();
}
/** Returns the RPC remote user when invoked inside an RPC. Note this
* may be different than the current user if called within another doAs
* @return connection's UGI or null if not an RPC
*/
public static UserGroupInformation getRemoteUser() {
Call call = CurCall.get();
return (call != null && call.connection != null) ? call.connection.user
: null;
}
/** Return true if the invocation was through an RPC.
*/
public static boolean isRpcInvocation() {
return CurCall.get() != null;
}
private String bindAddress;
private int port; // port we listen on
private int handlerCount; // number of handler threads
private int readThreads; // number of read threads
private int readerPendingConnectionQueue; // number of connections to queue per read thread
private Class<? extends Writable> rpcRequestClass; // class used for deserializing the rpc request
final protected RpcMetrics rpcMetrics;
final protected RpcDetailedMetrics rpcDetailedMetrics;
private Configuration conf;
private String portRangeConfig = null;
private SecretManager<TokenIdentifier> secretManager;
private SaslPropertiesResolver saslPropsResolver;
private ServiceAuthorizationManager serviceAuthorizationManager = new ServiceAuthorizationManager();
private int maxQueueSize;
private final int maxRespSize;
private int socketSendBufferSize;
private final int maxDataLength;
private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
volatile private boolean running = true; // true while server runs
private CallQueueManager<Call> callQueue;
// maintains the set of client connections and handles idle timeouts
private ConnectionManager connectionManager;
private Listener listener = null;
private Responder responder = null;
private Handler[] handlers = null;
/**
* A convenience method to bind to a given address and report
* better exceptions if the address is not a valid host.
* @param socket the socket to bind
* @param address the address to bind to
* @param backlog the number of connections allowed in the queue
* @throws BindException if the address can't be bound
* @throws UnknownHostException if the address isn't a valid host name
* @throws IOException other random errors from bind
*/
public static void bind(ServerSocket socket, InetSocketAddress address,
int backlog) throws IOException {
bind(socket, address, backlog, null, null);
}
public static void bind(ServerSocket socket, InetSocketAddress address,
int backlog, Configuration conf, String rangeConf) throws IOException {
try {
IntegerRanges range = null;
if (rangeConf != null) {
range = conf.getRange(rangeConf, "");
}
if (range == null || range.isEmpty() || (address.getPort() != 0)) {
socket.bind(address, backlog);
} else {
for (Integer port : range) {
if (socket.isBound()) break;
try {
InetSocketAddress temp = new InetSocketAddress(address.getAddress(),
port);
socket.bind(temp, backlog);
} catch(BindException e) {
//Ignored
}
}
if (!socket.isBound()) {
throw new BindException("Could not find a free port in "+range);
}
}
} catch (SocketException e) {
throw NetUtils.wrapException(null,
0,
address.getHostName(),
address.getPort(), e);
}
}
/**
* Returns a handle to the rpcMetrics (required in tests)
* @return rpc metrics
*/
@VisibleForTesting
public RpcMetrics getRpcMetrics() {
return rpcMetrics;
}
@VisibleForTesting
public RpcDetailedMetrics getRpcDetailedMetrics() {
return rpcDetailedMetrics;
}
@VisibleForTesting
Iterable<? extends Thread> getHandlers() {
return Arrays.asList(handlers);
}
@VisibleForTesting
Connection[] getConnections() {
return connectionManager.toArray();
}
/**
* Refresh the service authorization ACL for the service handled by this server.
*/
public void refreshServiceAcl(Configuration conf, PolicyProvider provider) {
serviceAuthorizationManager.refresh(conf, provider);
}
/**
* Refresh the service authorization ACL for the service handled by this server
* using the specified Configuration.
*/
@Private
public void refreshServiceAclWithLoadedConfiguration(Configuration conf,
PolicyProvider provider) {
serviceAuthorizationManager.refreshWithLoadedConfiguration(conf, provider);
}
/**
* Returns a handle to the serviceAuthorizationManager (required in tests)
* @return instance of ServiceAuthorizationManager for this server
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
public ServiceAuthorizationManager getServiceAuthorizationManager() {
return serviceAuthorizationManager;
}
static Class<? extends BlockingQueue<Call>> getQueueClass(
String prefix, Configuration conf) {
String name = prefix + "." + CommonConfigurationKeys.IPC_CALLQUEUE_IMPL_KEY;
Class<?> queueClass = conf.getClass(name, LinkedBlockingQueue.class);
return CallQueueManager.convertQueueClass(queueClass, Call.class);
}
private String getQueueClassPrefix() {
return CommonConfigurationKeys.IPC_CALLQUEUE_NAMESPACE + "." + port;
}
/*
* Refresh the call queue
*/
public synchronized void refreshCallQueue(Configuration conf) {
// Create the next queue
String prefix = getQueueClassPrefix();
callQueue.swapQueue(getQueueClass(prefix, conf), maxQueueSize, prefix, conf);
}
/**
* Get from config if client backoff is enabled on that port.
*/
static boolean getClientBackoffEnable(
String prefix, Configuration conf) {
String name = prefix + "." +
CommonConfigurationKeys.IPC_BACKOFF_ENABLE;
return conf.getBoolean(name,
CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT);
}
/** A call queued for handling. */
public static class Call implements Schedulable {
private final int callId; // the client's call id
private final int retryCount; // the retry count of the call
private final Writable rpcRequest; // Serialized Rpc request from client
private final Connection connection; // connection to client
private long timestamp; // time received when response is null
// time served when response is not null
private ByteBuffer rpcResponse; // the response for this call
private final RPC.RpcKind rpcKind;
private final byte[] clientId;
private final Span traceSpan; // the tracing span on the server side
public Call(int id, int retryCount, Writable param,
Connection connection) {
this(id, retryCount, param, connection, RPC.RpcKind.RPC_BUILTIN,
RpcConstants.DUMMY_CLIENT_ID);
}
public Call(int id, int retryCount, Writable param, Connection connection,
RPC.RpcKind kind, byte[] clientId) {
this(id, retryCount, param, connection, kind, clientId, null);
}
public Call(int id, int retryCount, Writable param, Connection connection,
RPC.RpcKind kind, byte[] clientId, Span span) {
this.callId = id;
this.retryCount = retryCount;
this.rpcRequest = param;
this.connection = connection;
this.timestamp = Time.now();
this.rpcResponse = null;
this.rpcKind = kind;
this.clientId = clientId;
this.traceSpan = span;
}
@Override
public String toString() {
return rpcRequest + " from " + connection + " Call#" + callId + " Retry#"
+ retryCount;
}
public void setResponse(ByteBuffer response) {
this.rpcResponse = response;
}
// For Schedulable
@Override
public UserGroupInformation getUserGroupInformation() {
return connection.user;
}
}
/** Listens on the socket. Creates jobs for the handler threads*/
private class Listener extends Thread {
private ServerSocketChannel acceptChannel = null; //the accept channel
private Selector selector = null; //the selector that we use for the server
private Reader[] readers = null;
private int currentReader = 0;
private InetSocketAddress address; //the address we bind at
private int backlogLength = conf.getInt(
CommonConfigurationKeysPublic.IPC_SERVER_LISTEN_QUEUE_SIZE_KEY,
CommonConfigurationKeysPublic.IPC_SERVER_LISTEN_QUEUE_SIZE_DEFAULT);
public Listener() throws IOException {
address = new InetSocketAddress(bindAddress, port);
// Create a new server socket and set to non blocking mode
acceptChannel = ServerSocketChannel.open();
acceptChannel.configureBlocking(false);
// Bind the server socket to the local host and port
bind(acceptChannel.socket(), address, backlogLength, conf, portRangeConfig);
port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
// create a selector;
selector= Selector.open();
readers = new Reader[readThreads];
for (int i = 0; i < readThreads; i++) {
Reader reader = new Reader(
"Socket Reader #" + (i + 1) + " for port " + port);
readers[i] = reader;
reader.start();
}
// Register accepts on the server socket with the selector.
acceptChannel.register(selector, SelectionKey.OP_ACCEPT);
this.setName("IPC Server listener on " + port);
this.setDaemon(true);
}
private class Reader extends Thread {
final private BlockingQueue<Connection> pendingConnections;
private final Selector readSelector;
Reader(String name) throws IOException {
super(name);
this.pendingConnections =
new LinkedBlockingQueue<Connection>(readerPendingConnectionQueue);
this.readSelector = Selector.open();
}
@Override
public void run() {
LOG.info("Starting " + Thread.currentThread().getName());
try {
doRunLoop();
} finally {
try {
readSelector.close();
} catch (IOException ioe) {
LOG.error("Error closing read selector in " + Thread.currentThread().getName(), ioe);
}
}
}
private synchronized void doRunLoop() {
while (running) {
SelectionKey key = null;
try {
// consume as many connections as currently queued to avoid
// unbridled acceptance of connections that starves the select
int size = pendingConnections.size();
for (int i=size; i>0; i--) {
Connection conn = pendingConnections.take();
conn.channel.register(readSelector, SelectionKey.OP_READ, conn);
}
readSelector.select();
Iterator<SelectionKey> iter = readSelector.selectedKeys().iterator();
while (iter.hasNext()) {
key = iter.next();
iter.remove();
if (key.isValid()) {
if (key.isReadable()) {
doRead(key);
}
}
key = null;
}
} catch (InterruptedException e) {
if (running) { // unexpected -- log it
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
}
} catch (IOException ex) {
LOG.error("Error in Reader", ex);
}
}
}
/**
* Updating the readSelector while it's being used is not thread-safe,
* so the connection must be queued. The reader will drain the queue
* and update its readSelector before performing the next select
*/
public void addConnection(Connection conn) throws InterruptedException {
pendingConnections.put(conn);
readSelector.wakeup();
}
void shutdown() {
assert !running;
readSelector.wakeup();
try {
super.interrupt();
super.join();
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
@Override
public void run() {
LOG.info(Thread.currentThread().getName() + ": starting");
SERVER.set(Server.this);
connectionManager.startIdleScan();
while (running) {
SelectionKey key = null;
try {
getSelector().select();
Iterator<SelectionKey> iter = getSelector().selectedKeys().iterator();
while (iter.hasNext()) {
key = iter.next();
iter.remove();
try {
if (key.isValid()) {
if (key.isAcceptable())
doAccept(key);
}
} catch (IOException e) {
}
key = null;
}
} catch (OutOfMemoryError e) {
// we can run out of memory if we have too many threads
// log the event and sleep for a minute and give
// some thread(s) a chance to finish
LOG.warn("Out of Memory in server select", e);
closeCurrentConnection(key, e);
connectionManager.closeIdle(true);
try { Thread.sleep(60000); } catch (Exception ie) {}
} catch (Exception e) {
closeCurrentConnection(key, e);
}
}
LOG.info("Stopping " + Thread.currentThread().getName());
synchronized (this) {
try {
acceptChannel.close();
selector.close();
} catch (IOException e) { }
selector= null;
acceptChannel= null;
// close all connections
connectionManager.stopIdleScan();
connectionManager.closeAll();
}
}
private void closeCurrentConnection(SelectionKey key, Throwable e) {
if (key != null) {
Connection c = (Connection)key.attachment();
if (c != null) {
closeConnection(c);
c = null;
}
}
}
InetSocketAddress getAddress() {
return (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
}
void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfMemoryError {
ServerSocketChannel server = (ServerSocketChannel) key.channel();
SocketChannel channel;
while ((channel = server.accept()) != null) {
channel.configureBlocking(false);
channel.socket().setTcpNoDelay(tcpNoDelay);
channel.socket().setKeepAlive(true);
Reader reader = getReader();
Connection c = connectionManager.register(channel);
// If the connectionManager can't take it, close the connection.
if (c == null) {
if (channel.isOpen()) {
IOUtils.cleanup(null, channel);
}
continue;
}
key.attach(c); // so closeCurrentConnection can get the object
reader.addConnection(c);
}
}
void doRead(SelectionKey key) throws InterruptedException {
int count = 0;
Connection c = (Connection)key.attachment();
if (c == null) {
return;
}
c.setLastContact(Time.now());
try {
count = c.readAndProcess();
} catch (InterruptedException ieo) {
LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
throw ieo;
} catch (Exception e) {
// a WrappedRpcServerException is an exception that has been sent
// to the client, so the stacktrace is unnecessary; any other
// exceptions are unexpected internal server errors and thus the
// stacktrace should be logged
LOG.info(Thread.currentThread().getName() + ": readAndProcess from client " +
c.getHostAddress() + " threw exception [" + e + "]",
(e instanceof WrappedRpcServerException) ? null : e);
count = -1; //so that the (count < 0) block is executed
}
if (count < 0) {
closeConnection(c);
c = null;
}
else {
c.setLastContact(Time.now());
}
}
synchronized void doStop() {
if (selector != null) {
selector.wakeup();
Thread.yield();
}
if (acceptChannel != null) {
try {
acceptChannel.socket().close();
} catch (IOException e) {
LOG.info(Thread.currentThread().getName() + ":Exception in closing listener socket. " + e);
}
}
for (Reader r : readers) {
r.shutdown();
}
}
synchronized Selector getSelector() { return selector; }
// The method that will return the next reader to work with
// Simplistic implementation of round robin for now
Reader getReader() {
currentReader = (currentReader + 1) % readers.length;
return readers[currentReader];
}
}
// Sends responses of RPC back to clients.
private class Responder extends Thread {
private final Selector writeSelector;
private int pending; // connections waiting to register
final static int PURGE_INTERVAL = 900000; // 15mins
Responder() throws IOException {
this.setName("IPC Server Responder");
this.setDaemon(true);
writeSelector = Selector.open(); // create a selector
pending = 0;
}
@Override
public void run() {
LOG.info(Thread.currentThread().getName() + ": starting");
SERVER.set(Server.this);
try {
doRunLoop();
} finally {
LOG.info("Stopping " + Thread.currentThread().getName());
try {
writeSelector.close();
} catch (IOException ioe) {
LOG.error("Couldn't close write selector in " + Thread.currentThread().getName(), ioe);
}
}
}
private void doRunLoop() {
long lastPurgeTime = 0; // last check for old calls.
while (running) {
try {
waitPending(); // If a channel is being registered, wait.
writeSelector.select(PURGE_INTERVAL);
Iterator<SelectionKey> iter = writeSelector.selectedKeys().iterator();
while (iter.hasNext()) {
SelectionKey key = iter.next();
iter.remove();
try {
if (key.isValid() && key.isWritable()) {
doAsyncWrite(key);
}
} catch (IOException e) {
LOG.info(Thread.currentThread().getName() + ": doAsyncWrite threw exception " + e);
}
}
long now = Time.now();
if (now < lastPurgeTime + PURGE_INTERVAL) {
continue;
}
lastPurgeTime = now;
//
// If there were some calls that have not been sent out for a
// long time, discard them.
//
if(LOG.isDebugEnabled()) {
LOG.debug("Checking for old call responses.");
}
ArrayList<Call> calls;
// get the list of channels from list of keys.
synchronized (writeSelector.keys()) {
calls = new ArrayList<Call>(writeSelector.keys().size());
iter = writeSelector.keys().iterator();
while (iter.hasNext()) {
SelectionKey key = iter.next();
Call call = (Call)key.attachment();
if (call != null && key.channel() == call.connection.channel) {
calls.add(call);
}
}
}
for(Call call : calls) {
doPurge(call, now);
}
} catch (OutOfMemoryError e) {
//
// we can run out of memory if we have too many threads
// log the event and sleep for a minute and give
// some thread(s) a chance to finish
//
LOG.warn("Out of Memory in server select", e);
try { Thread.sleep(60000); } catch (Exception ie) {}
} catch (Exception e) {
LOG.warn("Exception in Responder", e);
}
}
}
private void doAsyncWrite(SelectionKey key) throws IOException {
Call call = (Call)key.attachment();
if (call == null) {
return;
}
if (key.channel() != call.connection.channel) {
throw new IOException("doAsyncWrite: bad channel");
}
synchronized(call.connection.responseQueue) {
if (processResponse(call.connection.responseQueue, false)) {
try {
key.interestOps(0);
} catch (CancelledKeyException e) {
/* The Listener/reader might have closed the socket.
* We don't explicitly cancel the key, so not sure if this will
* ever fire.
* This warning could be removed.
*/
LOG.warn("Exception while changing ops : " + e);
}
}
}
}
//
// Remove calls that have been pending in the responseQueue
// for a long time.
//
private void doPurge(Call call, long now) {
LinkedList<Call> responseQueue = call.connection.responseQueue;
synchronized (responseQueue) {
Iterator<Call> iter = responseQueue.listIterator(0);
while (iter.hasNext()) {
call = iter.next();
if (now > call.timestamp + PURGE_INTERVAL) {
closeConnection(call.connection);
break;
}
}
}
}
// Processes one response. Returns true if there are no more pending
// data for this channel.
//
private boolean processResponse(LinkedList<Call> responseQueue,
boolean inHandler) throws IOException {
boolean error = true;
boolean done = false; // there is more data for this channel.
int numElements = 0;
Call call = null;
try {
synchronized (responseQueue) {
//
// If there are no items for this channel, then we are done
//
numElements = responseQueue.size();
if (numElements == 0) {
error = false;
return true; // no more data for this channel.
}
//
// Extract the first call
//
call = responseQueue.removeFirst();
SocketChannel channel = call.connection.channel;
if (LOG.isDebugEnabled()) {
LOG.debug(Thread.currentThread().getName() + ": responding to " + call);
}
//
// Send as much data as we can in the non-blocking fashion
//
int numBytes = channelWrite(channel, call.rpcResponse);
if (numBytes < 0) {
return true;
}
if (!call.rpcResponse.hasRemaining()) {
//Clear out the response buffer so it can be collected
call.rpcResponse = null;
call.connection.decRpcCount();
if (numElements == 1) { // last call fully processes.
done = true; // no more data for this channel.
} else {
done = false; // more calls pending to be sent.
}
if (LOG.isDebugEnabled()) {
LOG.debug(Thread.currentThread().getName() + ": responding to " + call
+ " Wrote " + numBytes + " bytes.");
}
} else {
//
// If we were unable to write the entire response out, then
// insert in Selector queue.
//
call.connection.responseQueue.addFirst(call);
if (inHandler) {
// set the serve time when the response has to be sent later
call.timestamp = Time.now();
incPending();
try {
// Wakeup the thread blocked on select, only then can the call
// to channel.register() complete.
writeSelector.wakeup();
channel.register(writeSelector, SelectionKey.OP_WRITE, call);
} catch (ClosedChannelException e) {
//Its ok. channel might be closed else where.
done = true;
} finally {
decPending();
}
}
if (LOG.isDebugEnabled()) {
LOG.debug(Thread.currentThread().getName() + ": responding to " + call
+ " Wrote partial " + numBytes + " bytes.");
}
}
error = false; // everything went off well
}
} finally {
if (error && call != null) {
LOG.warn(Thread.currentThread().getName()+", call " + call + ": output error");
done = true; // error. no more data for this channel.
closeConnection(call.connection);
}
}
return done;
}
//
// Enqueue a response from the application.
//
void doRespond(Call call) throws IOException {
synchronized (call.connection.responseQueue) {
call.connection.responseQueue.addLast(call);
if (call.connection.responseQueue.size() == 1) {
processResponse(call.connection.responseQueue, true);
}
}
}
private synchronized void incPending() { // call waiting to be enqueued.
pending++;
}
private synchronized void decPending() { // call done enqueueing.
pending--;
notify();
}
private synchronized void waitPending() throws InterruptedException {
while (pending > 0) {
wait();
}
}
}
@InterfaceAudience.Private
public static enum AuthProtocol {
NONE(0),
SASL(-33);
public final int callId;
AuthProtocol(int callId) {
this.callId = callId;
}
static AuthProtocol valueOf(int callId) {
for (AuthProtocol authType : AuthProtocol.values()) {
if (authType.callId == callId) {
return authType;
}
}
return null;
}
};
/**
* Wrapper for RPC IOExceptions to be returned to the client. Used to
* let exceptions bubble up to top of processOneRpc where the correct
* callId can be associated with the response. Also used to prevent
* unnecessary stack trace logging if it's not an internal server error.
*/
@SuppressWarnings("serial")
private static class WrappedRpcServerException extends RpcServerException {
private final RpcErrorCodeProto errCode;
public WrappedRpcServerException(RpcErrorCodeProto errCode, IOException ioe) {
super(ioe.toString(), ioe);
this.errCode = errCode;
}
public WrappedRpcServerException(RpcErrorCodeProto errCode, String message) {
this(errCode, new RpcServerException(message));
}
@Override
public RpcErrorCodeProto getRpcErrorCodeProto() {
return errCode;
}
@Override
public String toString() {
return getCause().toString();
}
}
/** Reads calls from a connection and queues them for handling. */
public class Connection {
private boolean connectionHeaderRead = false; // connection header is read?
private boolean connectionContextRead = false; //if connection context that
//follows connection header is read
private SocketChannel channel;
private ByteBuffer data;
private ByteBuffer dataLengthBuffer;
private LinkedList<Call> responseQueue;
// number of outstanding rpcs
private AtomicInteger rpcCount = new AtomicInteger();
private long lastContact;
private int dataLength;
private Socket socket;
// Cache the remote host & port info so that even if the socket is
// disconnected, we can say where it used to connect to.
private String hostAddress;
private int remotePort;
private InetAddress addr;
IpcConnectionContextProto connectionContext;
String protocolName;
SaslServer saslServer;
private AuthMethod authMethod;
private AuthProtocol authProtocol;
private boolean saslContextEstablished;
private ByteBuffer connectionHeaderBuf = null;
private ByteBuffer unwrappedData;
private ByteBuffer unwrappedDataLengthBuffer;
private int serviceClass;
UserGroupInformation user = null;
public UserGroupInformation attemptingUser = null; // user name before auth
// Fake 'call' for failed authorization response
private final Call authFailedCall = new Call(AUTHORIZATION_FAILED_CALL_ID,
RpcConstants.INVALID_RETRY_COUNT, null, this);
private ByteArrayOutputStream authFailedResponse = new ByteArrayOutputStream();
private final Call saslCall = new Call(AuthProtocol.SASL.callId,
RpcConstants.INVALID_RETRY_COUNT, null, this);
private final ByteArrayOutputStream saslResponse = new ByteArrayOutputStream();
private boolean sentNegotiate = false;
private boolean useWrap = false;
public Connection(SocketChannel channel, long lastContact) {
this.channel = channel;
this.lastContact = lastContact;
this.data = null;
this.dataLengthBuffer = ByteBuffer.allocate(4);
this.unwrappedData = null;
this.unwrappedDataLengthBuffer = ByteBuffer.allocate(4);
this.socket = channel.socket();
this.addr = socket.getInetAddress();
if (addr == null) {
this.hostAddress = "*Unknown*";
} else {
this.hostAddress = addr.getHostAddress();
}
this.remotePort = socket.getPort();
this.responseQueue = new LinkedList<Call>();
if (socketSendBufferSize != 0) {
try {
socket.setSendBufferSize(socketSendBufferSize);
} catch (IOException e) {
LOG.warn("Connection: unable to set socket send buffer size to " +
socketSendBufferSize);
}
}
}
@Override
public String toString() {
return getHostAddress() + ":" + remotePort;
}
public String getHostAddress() {
return hostAddress;
}
public InetAddress getHostInetAddress() {
return addr;
}
public void setLastContact(long lastContact) {
this.lastContact = lastContact;
}
public long getLastContact() {
return lastContact;
}
/* Return true if the connection has no outstanding rpc */
private boolean isIdle() {
return rpcCount.get() == 0;
}
/* Decrement the outstanding RPC count */
private void decRpcCount() {
rpcCount.decrementAndGet();
}
/* Increment the outstanding RPC count */
private void incRpcCount() {
rpcCount.incrementAndGet();
}
private UserGroupInformation getAuthorizedUgi(String authorizedId)
throws InvalidToken, AccessControlException {
if (authMethod == AuthMethod.TOKEN) {
TokenIdentifier tokenId = SaslRpcServer.getIdentifier(authorizedId,
secretManager);
UserGroupInformation ugi = tokenId.getUser();
if (ugi == null) {
throw new AccessControlException(
"Can't retrieve username from tokenIdentifier.");
}
ugi.addTokenIdentifier(tokenId);
return ugi;
} else {
return UserGroupInformation.createRemoteUser(authorizedId, authMethod);
}
}
private void saslReadAndProcess(DataInputStream dis) throws
WrappedRpcServerException, IOException, InterruptedException {
final RpcSaslProto saslMessage =
decodeProtobufFromStream(RpcSaslProto.newBuilder(), dis);
switch (saslMessage.getState()) {
case WRAP: {
if (!saslContextEstablished || !useWrap) {
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
new SaslException("Server is not wrapping data"));
}
// loops over decoded data and calls processOneRpc
unwrapPacketAndProcessRpcs(saslMessage.getToken().toByteArray());
break;
}
default:
saslProcess(saslMessage);
}
}
private Throwable getCauseForInvalidToken(IOException e) {
Throwable cause = e;
while (cause != null) {
if (cause instanceof RetriableException) {
return cause;
} else if (cause instanceof StandbyException) {
return cause;
} else if (cause instanceof InvalidToken) {
// FIXME: hadoop method signatures are restricting the SASL
// callbacks to only returning InvalidToken, but some services
// need to throw other exceptions (ex. NN + StandyException),
// so for now we'll tunnel the real exceptions via an
// InvalidToken's cause which normally is not set
if (cause.getCause() != null) {
cause = cause.getCause();
}
return cause;
}
cause = cause.getCause();
}
return e;
}
private void saslProcess(RpcSaslProto saslMessage)
throws WrappedRpcServerException, IOException, InterruptedException {
if (saslContextEstablished) {
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
new SaslException("Negotiation is already complete"));
}
RpcSaslProto saslResponse = null;
try {
try {
saslResponse = processSaslMessage(saslMessage);
} catch (IOException e) {
rpcMetrics.incrAuthenticationFailures();
if (LOG.isDebugEnabled()) {
LOG.debug(StringUtils.stringifyException(e));
}
// attempting user could be null
IOException tce = (IOException) getCauseForInvalidToken(e);
AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
+ attemptingUser + " (" + e.getLocalizedMessage()
+ ") with true cause: (" + tce.getLocalizedMessage() + ")");
throw tce;
}
if (saslServer != null && saslServer.isComplete()) {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL server context established. Negotiated QoP is "
+ saslServer.getNegotiatedProperty(Sasl.QOP));
}
user = getAuthorizedUgi(saslServer.getAuthorizationID());
if (LOG.isDebugEnabled()) {
LOG.debug("SASL server successfully authenticated client: " + user);
}
rpcMetrics.incrAuthenticationSuccesses();
AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user);
saslContextEstablished = true;
}
} catch (WrappedRpcServerException wrse) { // don't re-wrap
throw wrse;
} catch (IOException ioe) {
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_UNAUTHORIZED, ioe);
}
// send back response if any, may throw IOException
if (saslResponse != null) {
doSaslReply(saslResponse);
}
// do NOT enable wrapping until the last auth response is sent
if (saslContextEstablished) {
String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP);
// SASL wrapping is only used if the connection has a QOP, and
// the value is not auth. ex. auth-int & auth-priv
useWrap = (qop != null && !"auth".equalsIgnoreCase(qop));
}
}
private RpcSaslProto processSaslMessage(RpcSaslProto saslMessage)
throws IOException, InterruptedException {
final RpcSaslProto saslResponse;
final SaslState state = saslMessage.getState(); // required
switch (state) {
case NEGOTIATE: {
if (sentNegotiate) {
throw new AccessControlException(
"Client already attempted negotiation");
}
saslResponse = buildSaslNegotiateResponse();
// simple-only server negotiate response is success which client
// interprets as switch to simple
if (saslResponse.getState() == SaslState.SUCCESS) {
switchToSimple();
}
break;
}
case INITIATE: {
if (saslMessage.getAuthsCount() != 1) {
throw new SaslException("Client mechanism is malformed");
}
// verify the client requested an advertised authType
SaslAuth clientSaslAuth = saslMessage.getAuths(0);
if (!negotiateResponse.getAuthsList().contains(clientSaslAuth)) {
if (sentNegotiate) {
throw new AccessControlException(
clientSaslAuth.getMethod() + " authentication is not enabled."
+ " Available:" + enabledAuthMethods);
}
saslResponse = buildSaslNegotiateResponse();
break;
}
authMethod = AuthMethod.valueOf(clientSaslAuth.getMethod());
// abort SASL for SIMPLE auth, server has already ensured that
// SIMPLE is a legit option above. we will send no response
if (authMethod == AuthMethod.SIMPLE) {
switchToSimple();
saslResponse = null;
break;
}
// sasl server for tokens may already be instantiated
if (saslServer == null || authMethod != AuthMethod.TOKEN) {
saslServer = createSaslServer(authMethod);
}
saslResponse = processSaslToken(saslMessage);
break;
}
case RESPONSE: {
saslResponse = processSaslToken(saslMessage);
break;
}
default:
throw new SaslException("Client sent unsupported state " + state);
}
return saslResponse;
}
private RpcSaslProto processSaslToken(RpcSaslProto saslMessage)
throws SaslException {
if (!saslMessage.hasToken()) {
throw new SaslException("Client did not send a token");
}
byte[] saslToken = saslMessage.getToken().toByteArray();
if (LOG.isDebugEnabled()) {
LOG.debug("Have read input token of size " + saslToken.length
+ " for processing by saslServer.evaluateResponse()");
}
saslToken = saslServer.evaluateResponse(saslToken);
return buildSaslResponse(
saslServer.isComplete() ? SaslState.SUCCESS : SaslState.CHALLENGE,
saslToken);
}
private void switchToSimple() {
// disable SASL and blank out any SASL server
authProtocol = AuthProtocol.NONE;
saslServer = null;
}
private RpcSaslProto buildSaslResponse(SaslState state, byte[] replyToken) {
if (LOG.isDebugEnabled()) {
LOG.debug("Will send " + state + " token of size "
+ ((replyToken != null) ? replyToken.length : null)
+ " from saslServer.");
}
RpcSaslProto.Builder response = RpcSaslProto.newBuilder();
response.setState(state);
if (replyToken != null) {
response.setToken(ByteString.copyFrom(replyToken));
}
return response.build();
}
private void doSaslReply(Message message) throws IOException {
setupResponse(saslResponse, saslCall,
RpcStatusProto.SUCCESS, null,
new RpcResponseWrapper(message), null, null);
responder.doRespond(saslCall);
}
private void doSaslReply(Exception ioe) throws IOException {
setupResponse(authFailedResponse, authFailedCall,
RpcStatusProto.FATAL, RpcErrorCodeProto.FATAL_UNAUTHORIZED,
null, ioe.getClass().getName(), ioe.getLocalizedMessage());
responder.doRespond(authFailedCall);
}
private void disposeSasl() {
if (saslServer != null) {
try {
saslServer.dispose();
} catch (SaslException ignored) {
}
}
}
private void checkDataLength(int dataLength) throws IOException {
if (dataLength < 0) {
String error = "Unexpected data length " + dataLength +
"!! from " + getHostAddress();
LOG.warn(error);
throw new IOException(error);
} else if (dataLength > maxDataLength) {
String error = "Requested data length " + dataLength +
" is longer than maximum configured RPC length " +
maxDataLength + ". RPC came from " + getHostAddress();
LOG.warn(error);
throw new IOException(error);
}
}
public int readAndProcess()
throws WrappedRpcServerException, IOException, InterruptedException {
while (true) {
/* Read at most one RPC. If the header is not read completely yet
* then iterate until we read first RPC or until there is no data left.
*/
int count = -1;
if (dataLengthBuffer.remaining() > 0) {
count = channelRead(channel, dataLengthBuffer);
if (count < 0 || dataLengthBuffer.remaining() > 0)
return count;
}
if (!connectionHeaderRead) {
//Every connection is expected to send the header.
if (connectionHeaderBuf == null) {
connectionHeaderBuf = ByteBuffer.allocate(3);
}
count = channelRead(channel, connectionHeaderBuf);
if (count < 0 || connectionHeaderBuf.remaining() > 0) {
return count;
}
int version = connectionHeaderBuf.get(0);
// TODO we should add handler for service class later
this.setServiceClass(connectionHeaderBuf.get(1));
dataLengthBuffer.flip();
// Check if it looks like the user is hitting an IPC port
// with an HTTP GET - this is a common error, so we can
// send back a simple string indicating as much.
if (HTTP_GET_BYTES.equals(dataLengthBuffer)) {
setupHttpRequestOnIpcPortResponse();
return -1;
}
if (!RpcConstants.HEADER.equals(dataLengthBuffer)
|| version != CURRENT_VERSION) {
//Warning is ok since this is not supposed to happen.
LOG.warn("Incorrect header or version mismatch from " +
hostAddress + ":" + remotePort +
" got version " + version +
" expected version " + CURRENT_VERSION);
setupBadVersionResponse(version);
return -1;
}
// this may switch us into SIMPLE
authProtocol = initializeAuthContext(connectionHeaderBuf.get(2));
dataLengthBuffer.clear();
connectionHeaderBuf = null;
connectionHeaderRead = true;
continue;
}
if (data == null) {
dataLengthBuffer.flip();
dataLength = dataLengthBuffer.getInt();
checkDataLength(dataLength);
data = ByteBuffer.allocate(dataLength);
}
count = channelRead(channel, data);
if (data.remaining() == 0) {
dataLengthBuffer.clear();
data.flip();
boolean isHeaderRead = connectionContextRead;
processOneRpc(data.array());
data = null;
if (!isHeaderRead) {
continue;
}
}
return count;
}
}
private AuthProtocol initializeAuthContext(int authType)
throws IOException {
AuthProtocol authProtocol = AuthProtocol.valueOf(authType);
if (authProtocol == null) {
IOException ioe = new IpcException("Unknown auth protocol:" + authType);
doSaslReply(ioe);
throw ioe;
}
boolean isSimpleEnabled = enabledAuthMethods.contains(AuthMethod.SIMPLE);
switch (authProtocol) {
case NONE: {
// don't reply if client is simple and server is insecure
if (!isSimpleEnabled) {
IOException ioe = new AccessControlException(
"SIMPLE authentication is not enabled."
+ " Available:" + enabledAuthMethods);
doSaslReply(ioe);
throw ioe;
}
break;
}
default: {
break;
}
}
return authProtocol;
}
private RpcSaslProto buildSaslNegotiateResponse()
throws IOException, InterruptedException {
RpcSaslProto negotiateMessage = negotiateResponse;
// accelerate token negotiation by sending initial challenge
// in the negotiation response
if (enabledAuthMethods.contains(AuthMethod.TOKEN)) {
saslServer = createSaslServer(AuthMethod.TOKEN);
byte[] challenge = saslServer.evaluateResponse(new byte[0]);
RpcSaslProto.Builder negotiateBuilder =
RpcSaslProto.newBuilder(negotiateResponse);
negotiateBuilder.getAuthsBuilder(0) // TOKEN is always first
.setChallenge(ByteString.copyFrom(challenge));
negotiateMessage = negotiateBuilder.build();
}
sentNegotiate = true;
return negotiateMessage;
}
private SaslServer createSaslServer(AuthMethod authMethod)
throws IOException, InterruptedException {
final Map<String,?> saslProps =
saslPropsResolver.getServerProperties(addr);
return new SaslRpcServer(authMethod).create(this, saslProps, secretManager);
}
/**
* Try to set up the response to indicate that the client version
* is incompatible with the server. This can contain special-case
* code to speak enough of past IPC protocols to pass back
* an exception to the caller.
* @param clientVersion the version the caller is using
* @throws IOException
*/
private void setupBadVersionResponse(int clientVersion) throws IOException {
String errMsg = "Server IPC version " + CURRENT_VERSION +
" cannot communicate with client version " + clientVersion;
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
if (clientVersion >= 9) {
// Versions >>9 understand the normal response
Call fakeCall = new Call(-1, RpcConstants.INVALID_RETRY_COUNT, null,
this);
setupResponse(buffer, fakeCall,
RpcStatusProto.FATAL, RpcErrorCodeProto.FATAL_VERSION_MISMATCH,
null, VersionMismatch.class.getName(), errMsg);
responder.doRespond(fakeCall);
} else if (clientVersion >= 3) {
Call fakeCall = new Call(-1, RpcConstants.INVALID_RETRY_COUNT, null,
this);
// Versions 3 to 8 use older response
setupResponseOldVersionFatal(buffer, fakeCall,
null, VersionMismatch.class.getName(), errMsg);
responder.doRespond(fakeCall);
} else if (clientVersion == 2) { // Hadoop 0.18.3
Call fakeCall = new Call(0, RpcConstants.INVALID_RETRY_COUNT, null,
this);
DataOutputStream out = new DataOutputStream(buffer);
out.writeInt(0); // call ID
out.writeBoolean(true); // error
WritableUtils.writeString(out, VersionMismatch.class.getName());
WritableUtils.writeString(out, errMsg);
fakeCall.setResponse(ByteBuffer.wrap(buffer.toByteArray()));
responder.doRespond(fakeCall);
}
}
private void setupHttpRequestOnIpcPortResponse() throws IOException {
Call fakeCall = new Call(0, RpcConstants.INVALID_RETRY_COUNT, null, this);
fakeCall.setResponse(ByteBuffer.wrap(
RECEIVED_HTTP_REQ_RESPONSE.getBytes(Charsets.UTF_8)));
responder.doRespond(fakeCall);
}
/** Reads the connection context following the connection header
* @param dis - DataInputStream from which to read the header
* @throws WrappedRpcServerException - if the header cannot be
* deserialized, or the user is not authorized
*/
private void processConnectionContext(DataInputStream dis)
throws WrappedRpcServerException {
// allow only one connection context during a session
if (connectionContextRead) {
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
"Connection context already processed");
}
connectionContext = decodeProtobufFromStream(
IpcConnectionContextProto.newBuilder(), dis);
protocolName = connectionContext.hasProtocol() ? connectionContext
.getProtocol() : null;
UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
if (saslServer == null) {
user = protocolUser;
} else {
// user is authenticated
user.setAuthenticationMethod(authMethod);
//Now we check if this is a proxy user case. If the protocol user is
//different from the 'user', it is a proxy user scenario. However,
//this is not allowed if user authenticated with DIGEST.
if ((protocolUser != null)
&& (!protocolUser.getUserName().equals(user.getUserName()))) {
if (authMethod == AuthMethod.TOKEN) {
// Not allowed to doAs if token authentication is used
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_UNAUTHORIZED,
new AccessControlException("Authenticated user (" + user
+ ") doesn't match what the client claims to be ("
+ protocolUser + ")"));
} else {
// Effective user can be different from authenticated user
// for simple auth or kerberos auth
// The user is the real user. Now we create a proxy user
UserGroupInformation realUser = user;
user = UserGroupInformation.createProxyUser(protocolUser
.getUserName(), realUser);
}
}
}
authorizeConnection();
// don't set until after authz because connection isn't established
connectionContextRead = true;
}
/**
* Process a wrapped RPC Request - unwrap the SASL packet and process
* each embedded RPC request
* @param buf - SASL wrapped request of one or more RPCs
* @throws IOException - SASL packet cannot be unwrapped
* @throws InterruptedException
*/
private void unwrapPacketAndProcessRpcs(byte[] inBuf)
throws WrappedRpcServerException, IOException, InterruptedException {
if (LOG.isDebugEnabled()) {
LOG.debug("Have read input token of size " + inBuf.length
+ " for processing by saslServer.unwrap()");
}
inBuf = saslServer.unwrap(inBuf, 0, inBuf.length);
ReadableByteChannel ch = Channels.newChannel(new ByteArrayInputStream(
inBuf));
// Read all RPCs contained in the inBuf, even partial ones
while (true) {
int count = -1;
if (unwrappedDataLengthBuffer.remaining() > 0) {
count = channelRead(ch, unwrappedDataLengthBuffer);
if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0)
return;
}
if (unwrappedData == null) {
unwrappedDataLengthBuffer.flip();
int unwrappedDataLength = unwrappedDataLengthBuffer.getInt();
unwrappedData = ByteBuffer.allocate(unwrappedDataLength);
}
count = channelRead(ch, unwrappedData);
if (count <= 0 || unwrappedData.remaining() > 0)
return;
if (unwrappedData.remaining() == 0) {
unwrappedDataLengthBuffer.clear();
unwrappedData.flip();
processOneRpc(unwrappedData.array());
unwrappedData = null;
}
}
}
/**
* Process an RPC Request - handle connection setup and decoding of
* request into a Call
* @param buf - contains the RPC request header and the rpc request
* @throws IOException - internal error that should not be returned to
* client, typically failure to respond to client
* @throws WrappedRpcServerException - an exception to be sent back to
* the client that does not require verbose logging by the
* Listener thread
* @throws InterruptedException
*/
private void processOneRpc(byte[] buf)
throws IOException, WrappedRpcServerException, InterruptedException {
int callId = -1;
int retry = RpcConstants.INVALID_RETRY_COUNT;
try {
final DataInputStream dis =
new DataInputStream(new ByteArrayInputStream(buf));
final RpcRequestHeaderProto header =
decodeProtobufFromStream(RpcRequestHeaderProto.newBuilder(), dis);
callId = header.getCallId();
retry = header.getRetryCount();
if (LOG.isDebugEnabled()) {
LOG.debug(" got #" + callId);
}
checkRpcHeaders(header);
if (callId < 0) { // callIds typically used during connection setup
processRpcOutOfBandRequest(header, dis);
} else if (!connectionContextRead) {
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
"Connection context not established");
} else {
processRpcRequest(header, dis);
}
} catch (WrappedRpcServerException wrse) { // inform client of error
Throwable ioe = wrse.getCause();
final Call call = new Call(callId, retry, null, this);
setupResponse(authFailedResponse, call,
RpcStatusProto.FATAL, wrse.getRpcErrorCodeProto(), null,
ioe.getClass().getName(), ioe.getMessage());
responder.doRespond(call);
throw wrse;
}
}
/**
* Verify RPC header is valid
* @param header - RPC request header
* @throws WrappedRpcServerException - header contains invalid values
*/
private void checkRpcHeaders(RpcRequestHeaderProto header)
throws WrappedRpcServerException {
if (!header.hasRpcOp()) {
String err = " IPC Server: No rpc op in rpcRequestHeader";
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER, err);
}
if (header.getRpcOp() !=
RpcRequestHeaderProto.OperationProto.RPC_FINAL_PACKET) {
String err = "IPC Server does not implement rpc header operation" +
header.getRpcOp();
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER, err);
}
// If we know the rpc kind, get its class so that we can deserialize
// (Note it would make more sense to have the handler deserialize but
// we continue with this original design.
if (!header.hasRpcKind()) {
String err = " IPC Server: No rpc kind in rpcRequestHeader";
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER, err);
}
}
/**
* Process an RPC Request - the connection headers and context must
* have been already read
* @param header - RPC request header
* @param dis - stream to request payload
* @throws WrappedRpcServerException - due to fatal rpc layer issues such
* as invalid header or deserialization error. In this case a RPC fatal
* status response will later be sent back to client.
* @throws InterruptedException
*/
private void processRpcRequest(RpcRequestHeaderProto header,
DataInputStream dis) throws WrappedRpcServerException,
InterruptedException {
Class<? extends Writable> rpcRequestClass =
getRpcRequestWrapper(header.getRpcKind());
if (rpcRequestClass == null) {
LOG.warn("Unknown rpc kind " + header.getRpcKind() +
" from client " + getHostAddress());
final String err = "Unknown rpc kind in rpc header" +
header.getRpcKind();
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER, err);
}
Writable rpcRequest;
try { //Read the rpc request
rpcRequest = ReflectionUtils.newInstance(rpcRequestClass, conf);
rpcRequest.readFields(dis);
} catch (Throwable t) { // includes runtime exception from newInstance
LOG.warn("Unable to read call parameters for client " +
getHostAddress() + "on connection protocol " +
this.protocolName + " for rpcKind " + header.getRpcKind(), t);
String err = "IPC server unable to read call parameters: "+ t.getMessage();
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err);
}
Span traceSpan = null;
if (header.hasTraceInfo()) {
// If the incoming RPC included tracing info, always continue the trace
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
header.getTraceInfo().getParentId());
traceSpan = Trace.startSpan(
RpcClientUtil.toTraceName(rpcRequest.toString()),
parentSpan).detach();
}
Call call = new Call(header.getCallId(), header.getRetryCount(),
rpcRequest, this, ProtoUtil.convert(header.getRpcKind()),
header.getClientId().toByteArray(), traceSpan);
if (callQueue.isClientBackoffEnabled()) {
// if RPC queue is full, we will ask the RPC client to back off by
// throwing RetriableException. Whether RPC client will honor
// RetriableException and retry depends on client ipc retry policy.
// For example, FailoverOnNetworkExceptionRetry handles
// RetriableException.
queueRequestOrAskClientToBackOff(call);
} else {
callQueue.put(call); // queue the call; maybe blocked here
}
incRpcCount(); // Increment the rpc count
}
private void queueRequestOrAskClientToBackOff(Call call)
throws WrappedRpcServerException, InterruptedException {
// If rpc queue is full, we will ask the client to back off.
boolean isCallQueued = callQueue.offer(call);
if (!isCallQueued) {
rpcMetrics.incrClientBackoff();
RetriableException retriableException =
new RetriableException("Server is too busy.");
throw new WrappedRpcServerException(
RpcErrorCodeProto.ERROR_RPC_SERVER, retriableException);
}
}
/**
* Establish RPC connection setup by negotiating SASL if required, then
* reading and authorizing the connection header
* @param header - RPC header
* @param dis - stream to request payload
* @throws WrappedRpcServerException - setup failed due to SASL
* negotiation failure, premature or invalid connection context,
* or other state errors
* @throws IOException - failed to send a response back to the client
* @throws InterruptedException
*/
private void processRpcOutOfBandRequest(RpcRequestHeaderProto header,
DataInputStream dis) throws WrappedRpcServerException, IOException,
InterruptedException {
final int callId = header.getCallId();
if (callId == CONNECTION_CONTEXT_CALL_ID) {
// SASL must be established prior to connection context
if (authProtocol == AuthProtocol.SASL && !saslContextEstablished) {
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
"Connection header sent during SASL negotiation");
}
// read and authorize the user
processConnectionContext(dis);
} else if (callId == AuthProtocol.SASL.callId) {
// if client was switched to simple, ignore first SASL message
if (authProtocol != AuthProtocol.SASL) {
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
"SASL protocol not requested by client");
}
saslReadAndProcess(dis);
} else if (callId == PING_CALL_ID) {
LOG.debug("Received ping message");
} else {
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
"Unknown out of band call #" + callId);
}
}
/**
* Authorize proxy users to access this server
* @throws WrappedRpcServerException - user is not allowed to proxy
*/
private void authorizeConnection() throws WrappedRpcServerException {
try {
// If auth method is TOKEN, the token was obtained by the
// real user for the effective user, therefore not required to
// authorize real user. doAs is allowed only for simple or kerberos
// authentication
if (user != null && user.getRealUser() != null
&& (authMethod != AuthMethod.TOKEN)) {
ProxyUsers.authorize(user, this.getHostAddress());
}
authorize(user, protocolName, getHostInetAddress());
if (LOG.isDebugEnabled()) {
LOG.debug("Successfully authorized " + connectionContext);
}
rpcMetrics.incrAuthorizationSuccesses();
} catch (AuthorizationException ae) {
LOG.info("Connection from " + this
+ " for protocol " + connectionContext.getProtocol()
+ " is unauthorized for user " + user);
rpcMetrics.incrAuthorizationFailures();
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_UNAUTHORIZED, ae);
}
}
/**
* Decode the a protobuf from the given input stream
* @param builder - Builder of the protobuf to decode
* @param dis - DataInputStream to read the protobuf
* @return Message - decoded protobuf
* @throws WrappedRpcServerException - deserialization failed
*/
@SuppressWarnings("unchecked")
private <T extends Message> T decodeProtobufFromStream(Builder builder,
DataInputStream dis) throws WrappedRpcServerException {
try {
builder.mergeDelimitedFrom(dis);
return (T)builder.build();
} catch (Exception ioe) {
Class<?> protoClass = builder.getDefaultInstanceForType().getClass();
throw new WrappedRpcServerException(
RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST,
"Error decoding " + protoClass.getSimpleName() + ": "+ ioe);
}
}
/**
* Get service class for connection
* @return the serviceClass
*/
public int getServiceClass() {
return serviceClass;
}
/**
* Set service class for connection
* @param serviceClass the serviceClass to set
*/
public void setServiceClass(int serviceClass) {
this.serviceClass = serviceClass;
}
private synchronized void close() {
disposeSasl();
data = null;
dataLengthBuffer = null;
if (!channel.isOpen())
return;
try {socket.shutdownOutput();} catch(Exception e) {
LOG.debug("Ignoring socket shutdown exception", e);
}
if (channel.isOpen()) {
IOUtils.cleanup(null, channel);
}
IOUtils.cleanup(null, socket);
}
}
/** Handles queued calls . */
private class Handler extends Thread {
public Handler(int instanceNumber) {
this.setDaemon(true);
this.setName("IPC Server handler "+ instanceNumber + " on " + port);
}
@Override
public void run() {
LOG.debug(Thread.currentThread().getName() + ": starting");
SERVER.set(Server.this);
ByteArrayOutputStream buf =
new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
while (running) {
TraceScope traceScope = null;
try {
final Call call = callQueue.take(); // pop the queue; maybe blocked here
if (LOG.isDebugEnabled()) {
LOG.debug(Thread.currentThread().getName() + ": " + call + " for RpcKind " + call.rpcKind);
}
if (!call.connection.channel.isOpen()) {
LOG.info(Thread.currentThread().getName() + ": skipped " + call);
continue;
}
String errorClass = null;
String error = null;
RpcStatusProto returnStatus = RpcStatusProto.SUCCESS;
RpcErrorCodeProto detailedErr = null;
Writable value = null;
CurCall.set(call);
if (call.traceSpan != null) {
traceScope = Trace.continueSpan(call.traceSpan);
traceScope.getSpan().addTimelineAnnotation("called");
}
try {
// Make the call as the user via Subject.doAs, thus associating
// the call with the Subject
if (call.connection.user == null) {
value = call(call.rpcKind, call.connection.protocolName, call.rpcRequest,
call.timestamp);
} else {
value =
call.connection.user.doAs
(new PrivilegedExceptionAction<Writable>() {
@Override
public Writable run() throws Exception {
// make the call
return call(call.rpcKind, call.connection.protocolName,
call.rpcRequest, call.timestamp);
}
}
);
}
} catch (Throwable e) {
if (e instanceof UndeclaredThrowableException) {
e = e.getCause();
}
String logMsg = Thread.currentThread().getName() + ", call " + call;
if (exceptionsHandler.isTerse(e.getClass())) {
// Don't log the whole stack trace. Way too noisy!
LOG.info(logMsg + ": " + e);
} else if (e instanceof RuntimeException || e instanceof Error) {
// These exception types indicate something is probably wrong
// on the server side, as opposed to just a normal exceptional
// result.
LOG.warn(logMsg, e);
} else {
LOG.info(logMsg, e);
}
if (e instanceof RpcServerException) {
RpcServerException rse = ((RpcServerException)e);
returnStatus = rse.getRpcStatusProto();
detailedErr = rse.getRpcErrorCodeProto();
} else {
returnStatus = RpcStatusProto.ERROR;
detailedErr = RpcErrorCodeProto.ERROR_APPLICATION;
}
errorClass = e.getClass().getName();
error = StringUtils.stringifyException(e);
// Remove redundant error class name from the beginning of the stack trace
String exceptionHdr = errorClass + ": ";
if (error.startsWith(exceptionHdr)) {
error = error.substring(exceptionHdr.length());
}
}
CurCall.set(null);
synchronized (call.connection.responseQueue) {
// setupResponse() needs to be sync'ed together with
// responder.doResponse() since setupResponse may use
// SASL to encrypt response data and SASL enforces
// its own message ordering.
setupResponse(buf, call, returnStatus, detailedErr,
value, errorClass, error);
// Discard the large buf and reset it back to smaller size
// to free up heap
if (buf.size() > maxRespSize) {
LOG.warn("Large response size " + buf.size() + " for call "
+ call.toString());
buf = new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
}
responder.doRespond(call);
}
} catch (InterruptedException e) {
if (running) { // unexpected -- log it
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
if (Trace.isTracing()) {
traceScope.getSpan().addTimelineAnnotation("unexpectedly interrupted: " +
StringUtils.stringifyException(e));
}
}
} catch (Exception e) {
LOG.info(Thread.currentThread().getName() + " caught an exception", e);
if (Trace.isTracing()) {
traceScope.getSpan().addTimelineAnnotation("Exception: " +
StringUtils.stringifyException(e));
}
} finally {
if (traceScope != null) {
traceScope.close();
}
IOUtils.cleanup(LOG, traceScope);
}
}
LOG.debug(Thread.currentThread().getName() + ": exiting");
}
}
protected Server(String bindAddress, int port,
Class<? extends Writable> paramClass, int handlerCount,
Configuration conf)
throws IOException
{
this(bindAddress, port, paramClass, handlerCount, -1, -1, conf, Integer
.toString(port), null, null);
}
protected Server(String bindAddress, int port,
Class<? extends Writable> rpcRequestClass, int handlerCount,
int numReaders, int queueSizePerHandler, Configuration conf,
String serverName, SecretManager<? extends TokenIdentifier> secretManager)
throws IOException {
this(bindAddress, port, rpcRequestClass, handlerCount, numReaders,
queueSizePerHandler, conf, serverName, secretManager, null);
}
/**
* Constructs a server listening on the named port and address. Parameters passed must
* be of the named class. The <code>handlerCount</handlerCount> determines
* the number of handler threads that will be used to process calls.
* If queueSizePerHandler or numReaders are not -1 they will be used instead of parameters
* from configuration. Otherwise the configuration will be picked up.
*
* If rpcRequestClass is null then the rpcRequestClass must have been
* registered via {@link #registerProtocolEngine(RpcPayloadHeader.RpcKind,
* Class, RPC.RpcInvoker)}
* This parameter has been retained for compatibility with existing tests
* and usage.
*/
@SuppressWarnings("unchecked")
protected Server(String bindAddress, int port,
Class<? extends Writable> rpcRequestClass, int handlerCount,
int numReaders, int queueSizePerHandler, Configuration conf,
String serverName, SecretManager<? extends TokenIdentifier> secretManager,
String portRangeConfig)
throws IOException {
this.bindAddress = bindAddress;
this.conf = conf;
this.portRangeConfig = portRangeConfig;
this.port = port;
this.rpcRequestClass = rpcRequestClass;
this.handlerCount = handlerCount;
this.socketSendBufferSize = 0;
this.maxDataLength = conf.getInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH,
CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
if (queueSizePerHandler != -1) {
this.maxQueueSize = queueSizePerHandler;
} else {
this.maxQueueSize = handlerCount * conf.getInt(
CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY,
CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT);
}
this.maxRespSize = conf.getInt(
CommonConfigurationKeys.IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY,
CommonConfigurationKeys.IPC_SERVER_RPC_MAX_RESPONSE_SIZE_DEFAULT);
if (numReaders != -1) {
this.readThreads = numReaders;
} else {
this.readThreads = conf.getInt(
CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,
CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_DEFAULT);
}
this.readerPendingConnectionQueue = conf.getInt(
CommonConfigurationKeys.IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_KEY,
CommonConfigurationKeys.IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_DEFAULT);
// Setup appropriate callqueue
final String prefix = getQueueClassPrefix();
this.callQueue = new CallQueueManager<Call>(getQueueClass(prefix, conf),
getClientBackoffEnable(prefix, conf), maxQueueSize, prefix, conf);
this.secretManager = (SecretManager<TokenIdentifier>) secretManager;
this.authorize =
conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
false);
// configure supported authentications
this.enabledAuthMethods = getAuthMethods(secretManager, conf);
this.negotiateResponse = buildNegotiateResponse(enabledAuthMethods);
// Start the listener here and let it bind to the port
listener = new Listener();
this.port = listener.getAddress().getPort();
connectionManager = new ConnectionManager();
this.rpcMetrics = RpcMetrics.create(this, conf);
this.rpcDetailedMetrics = RpcDetailedMetrics.create(this.port);
this.tcpNoDelay = conf.getBoolean(
CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_DEFAULT);
// Create the responder here
responder = new Responder();
if (secretManager != null || UserGroupInformation.isSecurityEnabled()) {
SaslRpcServer.init(conf);
saslPropsResolver = SaslPropertiesResolver.getInstance(conf);
}
this.exceptionsHandler.addTerseExceptions(StandbyException.class);
}
private RpcSaslProto buildNegotiateResponse(List<AuthMethod> authMethods)
throws IOException {
RpcSaslProto.Builder negotiateBuilder = RpcSaslProto.newBuilder();
if (authMethods.contains(AuthMethod.SIMPLE) && authMethods.size() == 1) {
// SIMPLE-only servers return success in response to negotiate
negotiateBuilder.setState(SaslState.SUCCESS);
} else {
negotiateBuilder.setState(SaslState.NEGOTIATE);
for (AuthMethod authMethod : authMethods) {
SaslRpcServer saslRpcServer = new SaslRpcServer(authMethod);
SaslAuth.Builder builder = negotiateBuilder.addAuthsBuilder()
.setMethod(authMethod.toString())
.setMechanism(saslRpcServer.mechanism);
if (saslRpcServer.protocol != null) {
builder.setProtocol(saslRpcServer.protocol);
}
if (saslRpcServer.serverId != null) {
builder.setServerId(saslRpcServer.serverId);
}
}
}
return negotiateBuilder.build();
}
// get the security type from the conf. implicitly include token support
// if a secret manager is provided, or fail if token is the conf value but
// there is no secret manager
private List<AuthMethod> getAuthMethods(SecretManager<?> secretManager,
Configuration conf) {
AuthenticationMethod confAuthenticationMethod =
SecurityUtil.getAuthenticationMethod(conf);
List<AuthMethod> authMethods = new ArrayList<AuthMethod>();
if (confAuthenticationMethod == AuthenticationMethod.TOKEN) {
if (secretManager == null) {
throw new IllegalArgumentException(AuthenticationMethod.TOKEN +
" authentication requires a secret manager");
}
} else if (secretManager != null) {
LOG.debug(AuthenticationMethod.TOKEN +
" authentication enabled for secret manager");
// most preferred, go to the front of the line!
authMethods.add(AuthenticationMethod.TOKEN.getAuthMethod());
}
authMethods.add(confAuthenticationMethod.getAuthMethod());
LOG.debug("Server accepts auth methods:" + authMethods);
return authMethods;
}
private void closeConnection(Connection connection) {
connectionManager.close(connection);
}
/**
* Setup response for the IPC Call.
*
* @param responseBuf buffer to serialize the response into
* @param call {@link Call} to which we are setting up the response
* @param status of the IPC call
* @param rv return value for the IPC Call, if the call was successful
* @param errorClass error class, if the the call failed
* @param error error message, if the call failed
* @throws IOException
*/
private void setupResponse(ByteArrayOutputStream responseBuf,
Call call, RpcStatusProto status, RpcErrorCodeProto erCode,
Writable rv, String errorClass, String error)
throws IOException {
responseBuf.reset();
DataOutputStream out = new DataOutputStream(responseBuf);
RpcResponseHeaderProto.Builder headerBuilder =
RpcResponseHeaderProto.newBuilder();
headerBuilder.setClientId(ByteString.copyFrom(call.clientId));
headerBuilder.setCallId(call.callId);
headerBuilder.setRetryCount(call.retryCount);
headerBuilder.setStatus(status);
headerBuilder.setServerIpcVersionNum(CURRENT_VERSION);
if (status == RpcStatusProto.SUCCESS) {
RpcResponseHeaderProto header = headerBuilder.build();
final int headerLen = header.getSerializedSize();
int fullLength = CodedOutputStream.computeRawVarint32Size(headerLen) +
headerLen;
try {
if (rv instanceof ProtobufRpcEngine.RpcWrapper) {
ProtobufRpcEngine.RpcWrapper resWrapper =
(ProtobufRpcEngine.RpcWrapper) rv;
fullLength += resWrapper.getLength();
out.writeInt(fullLength);
header.writeDelimitedTo(out);
rv.write(out);
} else { // Have to serialize to buffer to get len
final DataOutputBuffer buf = new DataOutputBuffer();
rv.write(buf);
byte[] data = buf.getData();
fullLength += buf.getLength();
out.writeInt(fullLength);
header.writeDelimitedTo(out);
out.write(data, 0, buf.getLength());
}
} catch (Throwable t) {
LOG.warn("Error serializing call response for call " + call, t);
// Call back to same function - this is OK since the
// buffer is reset at the top, and since status is changed
// to ERROR it won't infinite loop.
setupResponse(responseBuf, call, RpcStatusProto.ERROR,
RpcErrorCodeProto.ERROR_SERIALIZING_RESPONSE,
null, t.getClass().getName(),
StringUtils.stringifyException(t));
return;
}
} else { // Rpc Failure
headerBuilder.setExceptionClassName(errorClass);
headerBuilder.setErrorMsg(error);
headerBuilder.setErrorDetail(erCode);
RpcResponseHeaderProto header = headerBuilder.build();
int headerLen = header.getSerializedSize();
final int fullLength =
CodedOutputStream.computeRawVarint32Size(headerLen) + headerLen;
out.writeInt(fullLength);
header.writeDelimitedTo(out);
}
if (call.connection.useWrap) {
wrapWithSasl(responseBuf, call);
}
call.setResponse(ByteBuffer.wrap(responseBuf.toByteArray()));
}
/**
* Setup response for the IPC Call on Fatal Error from a
* client that is using old version of Hadoop.
* The response is serialized using the previous protocol's response
* layout.
*
* @param response buffer to serialize the response into
* @param call {@link Call} to which we are setting up the response
* @param rv return value for the IPC Call, if the call was successful
* @param errorClass error class, if the the call failed
* @param error error message, if the call failed
* @throws IOException
*/
private void setupResponseOldVersionFatal(ByteArrayOutputStream response,
Call call,
Writable rv, String errorClass, String error)
throws IOException {
final int OLD_VERSION_FATAL_STATUS = -1;
response.reset();
DataOutputStream out = new DataOutputStream(response);
out.writeInt(call.callId); // write call id
out.writeInt(OLD_VERSION_FATAL_STATUS); // write FATAL_STATUS
WritableUtils.writeString(out, errorClass);
WritableUtils.writeString(out, error);
if (call.connection.useWrap) {
wrapWithSasl(response, call);
}
call.setResponse(ByteBuffer.wrap(response.toByteArray()));
}
private void wrapWithSasl(ByteArrayOutputStream response, Call call)
throws IOException {
if (call.connection.saslServer != null) {
byte[] token = response.toByteArray();
// synchronization may be needed since there can be multiple Handler
// threads using saslServer to wrap responses.
synchronized (call.connection.saslServer) {
token = call.connection.saslServer.wrap(token, 0, token.length);
}
if (LOG.isDebugEnabled())
LOG.debug("Adding saslServer wrapped token of size " + token.length
+ " as call response.");
response.reset();
// rebuild with sasl header and payload
RpcResponseHeaderProto saslHeader = RpcResponseHeaderProto.newBuilder()
.setCallId(AuthProtocol.SASL.callId)
.setStatus(RpcStatusProto.SUCCESS)
.build();
RpcSaslProto saslMessage = RpcSaslProto.newBuilder()
.setState(SaslState.WRAP)
.setToken(ByteString.copyFrom(token, 0, token.length))
.build();
RpcResponseMessageWrapper saslResponse =
new RpcResponseMessageWrapper(saslHeader, saslMessage);
DataOutputStream out = new DataOutputStream(response);
out.writeInt(saslResponse.getLength());
saslResponse.write(out);
}
}
Configuration getConf() {
return conf;
}
/** Sets the socket buffer size used for responding to RPCs */
public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
/** Starts the service. Must be called before any calls will be handled. */
public synchronized void start() {
responder.start();
listener.start();
handlers = new Handler[handlerCount];
for (int i = 0; i < handlerCount; i++) {
handlers[i] = new Handler(i);
handlers[i].start();
}
}
/** Stops the service. No new calls will be handled after this is called. */
public synchronized void stop() {
LOG.info("Stopping server on " + port);
running = false;
if (handlers != null) {
for (int i = 0; i < handlerCount; i++) {
if (handlers[i] != null) {
handlers[i].interrupt();
}
}
}
listener.interrupt();
listener.doStop();
responder.interrupt();
notifyAll();
this.rpcMetrics.shutdown();
this.rpcDetailedMetrics.shutdown();
}
/** Wait for the server to be stopped.
* Does not wait for all subthreads to finish.
* See {@link #stop()}.
*/
public synchronized void join() throws InterruptedException {
while (running) {
wait();
}
}
/**
* Return the socket (ip+port) on which the RPC server is listening to.
* @return the socket (ip+port) on which the RPC server is listening to.
*/
public synchronized InetSocketAddress getListenerAddress() {
return listener.getAddress();
}
/**
* Called for each call.
* @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, String,
* Writable, long)} instead
*/
@Deprecated
public Writable call(Writable param, long receiveTime) throws Exception {
return call(RPC.RpcKind.RPC_BUILTIN, null, param, receiveTime);
}
/** Called for each call. */
public abstract Writable call(RPC.RpcKind rpcKind, String protocol,
Writable param, long receiveTime) throws Exception;
/**
* Authorize the incoming client connection.
*
* @param user client user
* @param protocolName - the protocol
* @param addr InetAddress of incoming connection
* @throws AuthorizationException when the client isn't authorized to talk the protocol
*/
private void authorize(UserGroupInformation user, String protocolName,
InetAddress addr) throws AuthorizationException {
if (authorize) {
if (protocolName == null) {
throw new AuthorizationException("Null protocol not authorized");
}
Class<?> protocol = null;
try {
protocol = getProtocolClass(protocolName, getConf());
} catch (ClassNotFoundException cfne) {
throw new AuthorizationException("Unknown protocol: " +
protocolName);
}
serviceAuthorizationManager.authorize(user, protocol, getConf(), addr);
}
}
/**
* Get the port on which the IPC Server is listening for incoming connections.
* This could be an ephemeral port too, in which case we return the real
* port on which the Server has bound.
* @return port on which IPC Server is listening
*/
public int getPort() {
return port;
}
/**
* The number of open RPC conections
* @return the number of open rpc connections
*/
public int getNumOpenConnections() {
return connectionManager.size();
}
/**
* The number of rpc calls in the queue.
* @return The number of rpc calls in the queue.
*/
public int getCallQueueLen() {
return callQueue.size();
}
/**
* The maximum size of the rpc call queue of this server.
* @return The maximum size of the rpc call queue.
*/
public int getMaxQueueSize() {
return maxQueueSize;
}
/**
* The number of reader threads for this server.
* @return The number of reader threads.
*/
public int getNumReaders() {
return readThreads;
}
/**
* When the read or write buffer size is larger than this limit, i/o will be
* done in chunks of this size. Most RPC requests and responses would be
* be smaller.
*/
private static int NIO_BUFFER_LIMIT = 8*1024; //should not be more than 64KB.
/**
* This is a wrapper around {@link WritableByteChannel#write(ByteBuffer)}.
* If the amount of data is large, it writes to channel in smaller chunks.
* This is to avoid jdk from creating many direct buffers as the size of
* buffer increases. This also minimizes extra copies in NIO layer
* as a result of multiple write operations required to write a large
* buffer.
*
* @see WritableByteChannel#write(ByteBuffer)
*/
private int channelWrite(WritableByteChannel channel,
ByteBuffer buffer) throws IOException {
int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
channel.write(buffer) : channelIO(null, channel, buffer);
if (count > 0) {
rpcMetrics.incrSentBytes(count);
}
return count;
}
/**
* This is a wrapper around {@link ReadableByteChannel#read(ByteBuffer)}.
* If the amount of data is large, it writes to channel in smaller chunks.
* This is to avoid jdk from creating many direct buffers as the size of
* ByteBuffer increases. There should not be any performance degredation.
*
* @see ReadableByteChannel#read(ByteBuffer)
*/
private int channelRead(ReadableByteChannel channel,
ByteBuffer buffer) throws IOException {
int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
channel.read(buffer) : channelIO(channel, null, buffer);
if (count > 0) {
rpcMetrics.incrReceivedBytes(count);
}
return count;
}
/**
* Helper for {@link #channelRead(ReadableByteChannel, ByteBuffer)}
* and {@link #channelWrite(WritableByteChannel, ByteBuffer)}. Only
* one of readCh or writeCh should be non-null.
*
* @see #channelRead(ReadableByteChannel, ByteBuffer)
* @see #channelWrite(WritableByteChannel, ByteBuffer)
*/
private static int channelIO(ReadableByteChannel readCh,
WritableByteChannel writeCh,
ByteBuffer buf) throws IOException {
int originalLimit = buf.limit();
int initialRemaining = buf.remaining();
int ret = 0;
while (buf.remaining() > 0) {
try {
int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT);
buf.limit(buf.position() + ioSize);
ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf);
if (ret < ioSize) {
break;
}
} finally {
buf.limit(originalLimit);
}
}
int nBytes = initialRemaining - buf.remaining();
return (nBytes > 0) ? nBytes : ret;
}
private class ConnectionManager {
final private AtomicInteger count = new AtomicInteger();
final private Set<Connection> connections;
final private Timer idleScanTimer;
final private int idleScanThreshold;
final private int idleScanInterval;
final private int maxIdleTime;
final private int maxIdleToClose;
final private int maxConnections;
ConnectionManager() {
this.idleScanTimer = new Timer(
"IPC Server idle connection scanner for port " + getPort(), true);
this.idleScanThreshold = conf.getInt(
CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,
CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_DEFAULT);
this.idleScanInterval = conf.getInt(
CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY,
CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_DEFAULT);
this.maxIdleTime = 2 * conf.getInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT);
this.maxIdleToClose = conf.getInt(
CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,
CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_DEFAULT);
this.maxConnections = conf.getInt(
CommonConfigurationKeysPublic.IPC_SERVER_MAX_CONNECTIONS_KEY,
CommonConfigurationKeysPublic.IPC_SERVER_MAX_CONNECTIONS_DEFAULT);
// create a set with concurrency -and- a thread-safe iterator, add 2
// for listener and idle closer threads
this.connections = Collections.newSetFromMap(
new ConcurrentHashMap<Connection,Boolean>(
maxQueueSize, 0.75f, readThreads+2));
}
private boolean add(Connection connection) {
boolean added = connections.add(connection);
if (added) {
count.getAndIncrement();
}
return added;
}
private boolean remove(Connection connection) {
boolean removed = connections.remove(connection);
if (removed) {
count.getAndDecrement();
}
return removed;
}
int size() {
return count.get();
}
boolean isFull() {
// The check is disabled when maxConnections <= 0.
return ((maxConnections > 0) && (size() >= maxConnections));
}
Connection[] toArray() {
return connections.toArray(new Connection[0]);
}
Connection register(SocketChannel channel) {
if (isFull()) {
return null;
}
Connection connection = new Connection(channel, Time.now());
add(connection);
if (LOG.isDebugEnabled()) {
LOG.debug("Server connection from " + connection +
"; # active connections: " + size() +
"; # queued calls: " + callQueue.size());
}
return connection;
}
boolean close(Connection connection) {
boolean exists = remove(connection);
if (exists) {
if (LOG.isDebugEnabled()) {
LOG.debug(Thread.currentThread().getName() +
": disconnecting client " + connection +
". Number of active connections: "+ size());
}
// only close if actually removed to avoid double-closing due
// to possible races
connection.close();
}
return exists;
}
// synch'ed to avoid explicit invocation upon OOM from colliding with
// timer task firing
synchronized void closeIdle(boolean scanAll) {
long minLastContact = Time.now() - maxIdleTime;
// concurrent iterator might miss new connections added
// during the iteration, but that's ok because they won't
// be idle yet anyway and will be caught on next scan
int closed = 0;
for (Connection connection : connections) {
// stop if connections dropped below threshold unless scanning all
if (!scanAll && size() < idleScanThreshold) {
break;
}
// stop if not scanning all and max connections are closed
if (connection.isIdle() &&
connection.getLastContact() < minLastContact &&
close(connection) &&
!scanAll && (++closed == maxIdleToClose)) {
break;
}
}
}
void closeAll() {
// use a copy of the connections to be absolutely sure the concurrent
// iterator doesn't miss a connection
for (Connection connection : toArray()) {
close(connection);
}
}
void startIdleScan() {
scheduleIdleScanTask();
}
void stopIdleScan() {
idleScanTimer.cancel();
}
private void scheduleIdleScanTask() {
if (!running) {
return;
}
TimerTask idleScanTask = new TimerTask(){
@Override
public void run() {
if (!running) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug(Thread.currentThread().getName()+": task running");
}
try {
closeIdle(false);
} finally {
// explicitly reschedule so next execution occurs relative
// to the end of this scan, not the beginning
scheduleIdleScanTask();
}
}
};
idleScanTimer.schedule(idleScanTask, idleScanInterval);
}
}
}
| 106,429 | 36.40949 | 105 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/GenericRefreshProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol which is used to refresh arbitrary things at runtime.
*/
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface GenericRefreshProtocol {
/**
* Version 1: Initial version.
*/
public static final long versionID = 1L;
/**
* Refresh the resource based on identity passed in.
* @throws IOException
*/
@Idempotent
Collection<RefreshResponse> refresh(String identifier, String[] args)
throws IOException;
}
| 1,729 | 33.6 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/UserIdentityProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import org.apache.hadoop.security.UserGroupInformation;
/**
* The UserIdentityProvider creates uses the username as the
* identity. All jobs launched by a user will be grouped together.
*/
public class UserIdentityProvider implements IdentityProvider {
public String makeIdentity(Schedulable obj) {
UserGroupInformation ugi = obj.getUserGroupInformation();
if (ugi == null) {
return null;
}
return ugi.getUserName();
}
}
| 1,291 | 34.888889 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.protocolPB;
import java.io.IOException;
import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueRequestProto;
import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class RefreshCallQueueProtocolServerSideTranslatorPB implements
RefreshCallQueueProtocolPB {
private final RefreshCallQueueProtocol impl;
private final static RefreshCallQueueResponseProto
VOID_REFRESH_CALL_QUEUE_RESPONSE = RefreshCallQueueResponseProto
.newBuilder().build();
public RefreshCallQueueProtocolServerSideTranslatorPB(
RefreshCallQueueProtocol impl) {
this.impl = impl;
}
@Override
public RefreshCallQueueResponseProto refreshCallQueue(
RpcController controller, RefreshCallQueueRequestProto request)
throws ServiceException {
try {
impl.refreshCallQueue();
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REFRESH_CALL_QUEUE_RESPONSE;
}
}
| 1,979 | 34.357143 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.protocolPB;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.ipc.GenericRefreshProtocol;
import org.apache.hadoop.ipc.RefreshResponse;
import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshRequestProto;
import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseProto;
import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseCollectionProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class GenericRefreshProtocolServerSideTranslatorPB implements
GenericRefreshProtocolPB {
private final GenericRefreshProtocol impl;
public GenericRefreshProtocolServerSideTranslatorPB(
GenericRefreshProtocol impl) {
this.impl = impl;
}
@Override
public GenericRefreshResponseCollectionProto refresh(
RpcController controller, GenericRefreshRequestProto request)
throws ServiceException {
try {
List<String> argList = request.getArgsList();
String[] args = argList.toArray(new String[argList.size()]);
if (!request.hasIdentifier()) {
throw new ServiceException("Request must contain identifier");
}
Collection<RefreshResponse> results = impl.refresh(request.getIdentifier(), args);
return pack(results);
} catch (IOException e) {
throw new ServiceException(e);
}
}
// Convert a collection of RefreshResponse objects to a
// RefreshResponseCollection proto
private GenericRefreshResponseCollectionProto pack(
Collection<RefreshResponse> responses) {
GenericRefreshResponseCollectionProto.Builder b =
GenericRefreshResponseCollectionProto.newBuilder();
for (RefreshResponse response : responses) {
GenericRefreshResponseProto.Builder respBuilder =
GenericRefreshResponseProto.newBuilder();
respBuilder.setExitStatus(response.getReturnCode());
respBuilder.setUserMessage(response.getMessage());
respBuilder.setSenderName(response.getSenderName());
// Add to collection
b.addResponses(respBuilder);
}
return b.build();
}
}
| 3,021 | 34.552941 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolClientSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RefreshResponse;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.GenericRefreshProtocol;
import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshRequestProto;
import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseProto;
import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseCollectionProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class GenericRefreshProtocolClientSideTranslatorPB implements
ProtocolMetaInterface, GenericRefreshProtocol, Closeable {
/** RpcController is not used and hence is set to null. */
private final static RpcController NULL_CONTROLLER = null;
private final GenericRefreshProtocolPB rpcProxy;
public GenericRefreshProtocolClientSideTranslatorPB(
GenericRefreshProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);
}
@Override
public Collection<RefreshResponse> refresh(String identifier, String[] args) throws IOException {
List<String> argList = Arrays.asList(args);
try {
GenericRefreshRequestProto request = GenericRefreshRequestProto.newBuilder()
.setIdentifier(identifier)
.addAllArgs(argList)
.build();
GenericRefreshResponseCollectionProto resp = rpcProxy.refresh(NULL_CONTROLLER, request);
return unpack(resp);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
}
private Collection<RefreshResponse> unpack(GenericRefreshResponseCollectionProto collection) {
List<GenericRefreshResponseProto> responseProtos = collection.getResponsesList();
List<RefreshResponse> responses = new ArrayList<RefreshResponse>();
for (GenericRefreshResponseProto rp : responseProtos) {
RefreshResponse response = unpack(rp);
responses.add(response);
}
return responses;
}
private RefreshResponse unpack(GenericRefreshResponseProto proto) {
// The default values
String message = null;
String sender = null;
int returnCode = -1;
// ... that can be overridden by data from the protobuf
if (proto.hasUserMessage()) {
message = proto.getUserMessage();
}
if (proto.hasExitStatus()) {
returnCode = proto.getExitStatus();
}
if (proto.hasSenderName()) {
sender = proto.getSenderName();
}
// ... and put into a RefreshResponse
RefreshResponse response = new RefreshResponse(returnCode, message);
response.setSenderName(sender);
return response;
}
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy,
GenericRefreshProtocolPB.class,
RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(GenericRefreshProtocolPB.class),
methodName);
}
}
| 4,130 | 33.425 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolClientSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueRequestProto;
import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolPB;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class RefreshCallQueueProtocolClientSideTranslatorPB implements
ProtocolMetaInterface, RefreshCallQueueProtocol, Closeable {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final RefreshCallQueueProtocolPB rpcProxy;
private final static RefreshCallQueueRequestProto
VOID_REFRESH_CALL_QUEUE_REQUEST =
RefreshCallQueueRequestProto.newBuilder().build();
public RefreshCallQueueProtocolClientSideTranslatorPB(
RefreshCallQueueProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);
}
@Override
public void refreshCallQueue() throws IOException {
try {
rpcProxy.refreshCallQueue(NULL_CONTROLLER,
VOID_REFRESH_CALL_QUEUE_REQUEST);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
}
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy,
RefreshCallQueueProtocolPB.class,
RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(RefreshCallQueueProtocolPB.class),
methodName);
}
}
| 2,658 | 34.453333 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshProtocolService;
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@ProtocolInfo(
protocolName = "org.apache.hadoop.ipc.GenericRefreshProtocol",
protocolVersion = 1)
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Evolving
public interface GenericRefreshProtocolPB extends
GenericRefreshProtocolService.BlockingInterface {
}
| 1,592 | 40.921053 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/RefreshCallQueueProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueProtocolService;
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@ProtocolInfo(
protocolName = "org.apache.hadoop.ipc.RefreshCallQueueProtocol",
protocolVersion = 1)
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Evolving
public interface RefreshCallQueueProtocolPB extends
RefreshCallQueueProtocolService.BlockingInterface {
}
| 1,603 | 41.210526 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* RPC related metrics.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
package org.apache.hadoop.ipc.metrics;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,053 | 38.037037 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.metrics;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableRates;
/**
* This class is for maintaining RPC method related statistics
* and publishing them through the metrics interfaces.
*/
@InterfaceAudience.Private
@Metrics(about="Per method RPC metrics", context="rpcdetailed")
public class RpcDetailedMetrics {
@Metric MutableRates rates;
static final Log LOG = LogFactory.getLog(RpcDetailedMetrics.class);
final MetricsRegistry registry;
final String name;
RpcDetailedMetrics(int port) {
name = "RpcDetailedActivityForPort"+ port;
registry = new MetricsRegistry("rpcdetailed")
.tag("port", "RPC port", String.valueOf(port));
LOG.debug(registry.info());
}
public String name() { return name; }
public static RpcDetailedMetrics create(int port) {
RpcDetailedMetrics m = new RpcDetailedMetrics(port);
return DefaultMetricsSystem.instance().register(m.name, null, m);
}
/**
* Initialize the metrics for JMX with protocol methods
* @param protocol the protocol class
*/
public void init(Class<?> protocol) {
rates.init(protocol);
}
/**
* Add an RPC processing time sample
* @param name of the RPC call
* @param processingTime the processing time
*/
//@Override // some instrumentation interface
public void addProcessingTime(String name, int processingTime) {
rates.add(name, processingTime);
}
/**
* Shutdown the instrumentation for the process
*/
//@Override // some instrumentation interface
public void shutdown() {}
}
| 2,736 | 32.790123 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.metrics;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterInt;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.lib.MutableRate;
/**
* This class is for maintaining the various RPC statistics
* and publishing them through the metrics interfaces.
*/
@InterfaceAudience.Private
@Metrics(about="Aggregate RPC metrics", context="rpc")
public class RpcMetrics {
static final Log LOG = LogFactory.getLog(RpcMetrics.class);
final Server server;
final MetricsRegistry registry;
final String name;
final boolean rpcQuantileEnable;
RpcMetrics(Server server, Configuration conf) {
String port = String.valueOf(server.getListenerAddress().getPort());
name = "RpcActivityForPort" + port;
this.server = server;
registry = new MetricsRegistry("rpc").tag("port", "RPC port", port);
int[] intervals = conf.getInts(
CommonConfigurationKeys.RPC_METRICS_PERCENTILES_INTERVALS_KEY);
rpcQuantileEnable = (intervals.length > 0) && conf.getBoolean(
CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE,
CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE_DEFAULT);
if (rpcQuantileEnable) {
rpcQueueTimeMillisQuantiles =
new MutableQuantiles[intervals.length];
rpcProcessingTimeMillisQuantiles =
new MutableQuantiles[intervals.length];
for (int i = 0; i < intervals.length; i++) {
int interval = intervals[i];
rpcQueueTimeMillisQuantiles[i] = registry.newQuantiles("rpcQueueTime"
+ interval + "s", "rpc queue time in milli second", "ops",
"latency", interval);
rpcProcessingTimeMillisQuantiles[i] = registry.newQuantiles(
"rpcProcessingTime" + interval + "s",
"rpc processing time in milli second", "ops", "latency", interval);
}
}
LOG.debug("Initialized " + registry);
}
public String name() { return name; }
public static RpcMetrics create(Server server, Configuration conf) {
RpcMetrics m = new RpcMetrics(server, conf);
return DefaultMetricsSystem.instance().register(m.name, null, m);
}
@Metric("Number of received bytes") MutableCounterLong receivedBytes;
@Metric("Number of sent bytes") MutableCounterLong sentBytes;
@Metric("Queue time") MutableRate rpcQueueTime;
MutableQuantiles[] rpcQueueTimeMillisQuantiles;
@Metric("Processsing time") MutableRate rpcProcessingTime;
MutableQuantiles[] rpcProcessingTimeMillisQuantiles;
@Metric("Number of authentication failures")
MutableCounterLong rpcAuthenticationFailures;
@Metric("Number of authentication successes")
MutableCounterLong rpcAuthenticationSuccesses;
@Metric("Number of authorization failures")
MutableCounterLong rpcAuthorizationFailures;
@Metric("Number of authorization sucesses")
MutableCounterLong rpcAuthorizationSuccesses;
@Metric("Number of client backoff requests")
MutableCounterLong rpcClientBackoff;
@Metric("Number of open connections") public int numOpenConnections() {
return server.getNumOpenConnections();
}
@Metric("Length of the call queue") public int callQueueLength() {
return server.getCallQueueLen();
}
// Public instrumentation methods that could be extracted to an
// abstract class if we decide to do custom instrumentation classes a la
// JobTrackerInstrumenation. The methods with //@Override comment are
// candidates for abstract methods in a abstract instrumentation class.
/**
* One authentication failure event
*/
//@Override
public void incrAuthenticationFailures() {
rpcAuthenticationFailures.incr();
}
/**
* One authentication success event
*/
//@Override
public void incrAuthenticationSuccesses() {
rpcAuthenticationSuccesses.incr();
}
/**
* One authorization success event
*/
//@Override
public void incrAuthorizationSuccesses() {
rpcAuthorizationSuccesses.incr();
}
/**
* One authorization failure event
*/
//@Override
public void incrAuthorizationFailures() {
rpcAuthorizationFailures.incr();
}
/**
* Shutdown the instrumentation for the process
*/
//@Override
public void shutdown() {}
/**
* Increment sent bytes by count
* @param count to increment
*/
//@Override
public void incrSentBytes(int count) {
sentBytes.incr(count);
}
/**
* Increment received bytes by count
* @param count to increment
*/
//@Override
public void incrReceivedBytes(int count) {
receivedBytes.incr(count);
}
/**
* Add an RPC queue time sample
* @param qTime the queue time
*/
//@Override
public void addRpcQueueTime(int qTime) {
rpcQueueTime.add(qTime);
if (rpcQuantileEnable) {
for (MutableQuantiles q : rpcQueueTimeMillisQuantiles) {
q.add(qTime);
}
}
}
/**
* Add an RPC processing time sample
* @param processingTime the processing time
*/
//@Override
public void addRpcProcessingTime(int processingTime) {
rpcProcessingTime.add(processingTime);
if (rpcQuantileEnable) {
for (MutableQuantiles q : rpcProcessingTimeMillisQuantiles) {
q.add(processingTime);
}
}
}
/**
* One client backoff event
*/
//@Override
public void incrClientBackoff() {
rpcClientBackoff.incr();
}
}
| 6,699 | 31.524272 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RetryCacheMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.metrics;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.ipc.RetryCache;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
/**
* This class is for maintaining the various RetryCache-related statistics
* and publishing them through the metrics interfaces.
*/
@InterfaceAudience.Private
@Metrics(about="Aggregate RetryCache metrics", context="rpc")
public class RetryCacheMetrics {
static final Log LOG = LogFactory.getLog(RetryCacheMetrics.class);
final MetricsRegistry registry;
final String name;
RetryCacheMetrics(RetryCache retryCache) {
name = "RetryCache."+ retryCache.getCacheName();
registry = new MetricsRegistry(name);
if (LOG.isDebugEnabled()) {
LOG.debug("Initialized "+ registry);
}
}
public String getName() { return name; }
public static RetryCacheMetrics create(RetryCache cache) {
RetryCacheMetrics m = new RetryCacheMetrics(cache);
return DefaultMetricsSystem.instance().register(m.name, null, m);
}
@Metric("Number of RetryCache hit") MutableCounterLong cacheHit;
@Metric("Number of RetryCache cleared") MutableCounterLong cacheCleared;
@Metric("Number of RetryCache updated") MutableCounterLong cacheUpdated;
/**
* One cache hit event
*/
public void incrCacheHit() {
cacheHit.incr();
}
/**
* One cache cleared
*/
public void incrCacheCleared() {
cacheCleared.incr();
}
/**
* One cache updated
*/
public void incrCacheUpdated() {
cacheUpdated.incr();
}
public long getCacheHit() {
return cacheHit.value();
}
public long getCacheCleared() {
return cacheCleared.value();
}
public long getCacheUpdated() {
return cacheUpdated.value();
}
}
| 2,883 | 29.357895 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.io.Charsets;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
/**
* <p>
* Simple {@link DNSToSwitchMapping} implementation that reads a 2 column text
* file. The columns are separated by whitespace. The first column is a DNS or
* IP address and the second column specifies the rack where the address maps.
* </p>
* <p>
* This class uses the configuration parameter {@code
* net.topology.table.file.name} to locate the mapping file.
* </p>
* <p>
* Calls to {@link #resolve(List)} will look up the address as defined in the
* mapping file. If no entry corresponding to the address is found, the value
* {@code /default-rack} is returned.
* </p>
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class TableMapping extends CachedDNSToSwitchMapping {
private static final Log LOG = LogFactory.getLog(TableMapping.class);
public TableMapping() {
super(new RawTableMapping());
}
private RawTableMapping getRawMapping() {
return (RawTableMapping) rawMapping;
}
@Override
public Configuration getConf() {
return getRawMapping().getConf();
}
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
getRawMapping().setConf(conf);
}
@Override
public void reloadCachedMappings() {
super.reloadCachedMappings();
getRawMapping().reloadCachedMappings();
}
private static final class RawTableMapping extends Configured
implements DNSToSwitchMapping {
private Map<String, String> map;
private Map<String, String> load() {
Map<String, String> loadMap = new HashMap<String, String>();
String filename = getConf().get(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, null);
if (StringUtils.isBlank(filename)) {
LOG.warn(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY + " not configured. ");
return null;
}
try (BufferedReader reader =
new BufferedReader(new InputStreamReader(
new FileInputStream(filename), Charsets.UTF_8))) {
String line = reader.readLine();
while (line != null) {
line = line.trim();
if (line.length() != 0 && line.charAt(0) != '#') {
String[] columns = line.split("\\s+");
if (columns.length == 2) {
loadMap.put(columns[0], columns[1]);
} else {
LOG.warn("Line does not have two columns. Ignoring. " + line);
}
}
line = reader.readLine();
}
} catch (Exception e) {
LOG.warn(filename + " cannot be read.", e);
return null;
}
return loadMap;
}
@Override
public synchronized List<String> resolve(List<String> names) {
if (map == null) {
map = load();
if (map == null) {
LOG.warn("Failed to read topology table. " +
NetworkTopology.DEFAULT_RACK + " will be used for all nodes.");
map = new HashMap<String, String>();
}
}
List<String> results = new ArrayList<String>(names.size());
for (String name : names) {
String result = map.get(name);
if (result != null) {
results.add(result);
} else {
results.add(NetworkTopology.DEFAULT_RACK);
}
}
return results;
}
@Override
public void reloadCachedMappings() {
Map<String, String> newMap = load();
if (newMap == null) {
LOG.error("Failed to reload the topology table. The cached " +
"mappings will not be cleared.");
} else {
synchronized(this) {
map = newMap;
}
}
}
@Override
public void reloadCachedMappings(List<String> names) {
// TableMapping has to reload all mappings at once, so no chance to
// reload mappings on specific nodes
reloadCachedMappings();
}
}
}
| 5,375 | 30.810651 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMappingWithDependency.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* An interface that must be implemented to allow pluggable
* DNS-name/IP-address to RackID resolvers.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface DNSToSwitchMappingWithDependency extends DNSToSwitchMapping {
/**
* Get a list of dependent DNS-names for a given DNS-name/IP-address.
* Dependent DNS-names fall into the same fault domain which must be
* taken into account when placing replicas. This is intended to be used for
* cross node group dependencies when node groups are not sufficient to
* distinguish data nodes by fault domains. In practice, this is needed when
* a compute server runs VMs which use shared storage (as opposite to
* directly attached storage). In this case data nodes fall in two different
* fault domains. One fault domain is defined by a compute server and
* the other is defined by storage. With node groups we can group data nodes
* either by server fault domain or by storage fault domain. However one of
* the fault domains cannot be handled and there we need to define cross node
* group dependencies. These dependencies are applied in block placement
* polices which ensure that no two replicas will be on two dependent nodes.
* @param name - host name or IP address of a data node. Input host name
* parameter must take a value of dfs.datanode.hostname config value if this
* config property is set. Otherwise FQDN of the data node is used.
* @return list of dependent host names. If dfs.datanode.hostname config
* property is set, then its value must be returned.
* Otherwise, FQDN is returned.
*/
public List<String> getDependency(String name);
}
| 2,682 | 46.070175 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/Node.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** The interface defines a node in a network topology.
* A node may be a leave representing a data node or an inner
* node representing a datacenter or rack.
* Each data has a name and its location in the network is
* decided by a string with syntax similar to a file name.
* For example, a data node's name is hostname:port# and if it's located at
* rack "orange" in datacenter "dog", the string representation of its
* network location is /dog/orange
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public interface Node {
/** @return the string representation of this node's network location */
public String getNetworkLocation();
/** Set this node's network location
* @param location the location
*/
public void setNetworkLocation(String location);
/** @return this node's name */
public String getName();
/** @return this node's parent */
public Node getParent();
/** Set this node's parent
* @param parent the parent
*/
public void setParent(Node parent);
/** @return this node's level in the tree.
* E.g. the root of a tree returns 0 and its children return 1
*/
public int getLevel();
/** Set this node's level in the tree
* @param i the level
*/
public void setLevel(int i);
}
| 2,256 | 33.723077 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Proxy;
import java.net.Socket;
import java.net.UnknownHostException;
import javax.net.SocketFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
/**
* Specialized SocketFactory to create sockets with a SOCKS proxy
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class SocksSocketFactory extends SocketFactory implements
Configurable {
private Configuration conf;
private Proxy proxy;
/**
* Default empty constructor (for use with the reflection API).
*/
public SocksSocketFactory() {
this.proxy = Proxy.NO_PROXY;
}
/**
* Constructor with a supplied Proxy
*
* @param proxy the proxy to use to create sockets
*/
public SocksSocketFactory(Proxy proxy) {
this.proxy = proxy;
}
@Override
public Socket createSocket() throws IOException {
return new Socket(proxy);
}
@Override
public Socket createSocket(InetAddress addr, int port) throws IOException {
Socket socket = createSocket();
socket.connect(new InetSocketAddress(addr, port));
return socket;
}
@Override
public Socket createSocket(InetAddress addr, int port,
InetAddress localHostAddr, int localPort) throws IOException {
Socket socket = createSocket();
socket.bind(new InetSocketAddress(localHostAddr, localPort));
socket.connect(new InetSocketAddress(addr, port));
return socket;
}
@Override
public Socket createSocket(String host, int port) throws IOException,
UnknownHostException {
Socket socket = createSocket();
socket.connect(new InetSocketAddress(host, port));
return socket;
}
@Override
public Socket createSocket(String host, int port,
InetAddress localHostAddr, int localPort) throws IOException,
UnknownHostException {
Socket socket = createSocket();
socket.bind(new InetSocketAddress(localHostAddr, localPort));
socket.connect(new InetSocketAddress(host, port));
return socket;
}
@Override
public int hashCode() {
return proxy.hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (!(obj instanceof SocksSocketFactory))
return false;
final SocksSocketFactory other = (SocksSocketFactory) obj;
if (proxy == null) {
if (other.proxy != null)
return false;
} else if (!proxy.equals(other.proxy))
return false;
return true;
}
@Override
public Configuration getConf() {
return this.conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
String proxyStr = conf.get("hadoop.socks.server");
if ((proxyStr != null) && (proxyStr.length() > 0)) {
setProxy(proxyStr);
}
}
/**
* Set the proxy of this socket factory as described in the string
* parameter
*
* @param proxyStr the proxy address using the format "host:port"
*/
private void setProxy(String proxyStr) {
String[] strs = proxyStr.split(":", 2);
if (strs.length != 2)
throw new RuntimeException("Bad SOCKS proxy parameter: " + proxyStr);
String host = strs[0];
int port = Integer.parseInt(strs[1]);
this.proxy =
new Proxy(Proxy.Type.SOCKS, InetSocketAddress.createUnresolved(host,
port));
}
}
| 4,400 | 26.679245 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Constructor;
import java.net.BindException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
import java.net.NoRouteToHostException;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.UnknownHostException;
import java.net.ConnectException;
import java.nio.channels.SocketChannel;
import java.util.Map.Entry;
import java.util.regex.Pattern;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.net.util.SubnetUtils;
import org.apache.commons.net.util.SubnetUtils.SubnetInfo;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Preconditions;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class NetUtils {
private static final Log LOG = LogFactory.getLog(NetUtils.class);
private static Map<String, String> hostToResolved =
new HashMap<String, String>();
/** text to point users elsewhere: {@value} */
private static final String FOR_MORE_DETAILS_SEE
= " For more details see: ";
/** text included in wrapped exceptions if the host is null: {@value} */
public static final String UNKNOWN_HOST = "(unknown)";
/** Base URL of the Hadoop Wiki: {@value} */
public static final String HADOOP_WIKI = "http://wiki.apache.org/hadoop/";
/**
* Get the socket factory for the given class according to its
* configuration parameter
* <tt>hadoop.rpc.socket.factory.class.<ClassName></tt>. When no
* such parameter exists then fall back on the default socket factory as
* configured by <tt>hadoop.rpc.socket.factory.class.default</tt>. If
* this default socket factory is not configured, then fall back on the JVM
* default socket factory.
*
* @param conf the configuration
* @param clazz the class (usually a {@link VersionedProtocol})
* @return a socket factory
*/
public static SocketFactory getSocketFactory(Configuration conf,
Class<?> clazz) {
SocketFactory factory = null;
String propValue =
conf.get("hadoop.rpc.socket.factory.class." + clazz.getSimpleName());
if ((propValue != null) && (propValue.length() > 0))
factory = getSocketFactoryFromProperty(conf, propValue);
if (factory == null)
factory = getDefaultSocketFactory(conf);
return factory;
}
/**
* Get the default socket factory as specified by the configuration
* parameter <tt>hadoop.rpc.socket.factory.default</tt>
*
* @param conf the configuration
* @return the default socket factory as specified in the configuration or
* the JVM default socket factory if the configuration does not
* contain a default socket factory property.
*/
public static SocketFactory getDefaultSocketFactory(Configuration conf) {
String propValue = conf.get(
CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_DEFAULT);
if ((propValue == null) || (propValue.length() == 0))
return SocketFactory.getDefault();
return getSocketFactoryFromProperty(conf, propValue);
}
/**
* Get the socket factory corresponding to the given proxy URI. If the
* given proxy URI corresponds to an absence of configuration parameter,
* returns null. If the URI is malformed raises an exception.
*
* @param propValue the property which is the class name of the
* SocketFactory to instantiate; assumed non null and non empty.
* @return a socket factory as defined in the property value.
*/
public static SocketFactory getSocketFactoryFromProperty(
Configuration conf, String propValue) {
try {
Class<?> theClass = conf.getClassByName(propValue);
return (SocketFactory) ReflectionUtils.newInstance(theClass, conf);
} catch (ClassNotFoundException cnfe) {
throw new RuntimeException("Socket Factory class not found: " + cnfe);
}
}
/**
* Util method to build socket addr from either:
* <host>:<port>
* <fs>://<host>:<port>/<path>
*/
public static InetSocketAddress createSocketAddr(String target) {
return createSocketAddr(target, -1);
}
/**
* Util method to build socket addr from either:
* <host>
* <host>:<port>
* <fs>://<host>:<port>/<path>
*/
public static InetSocketAddress createSocketAddr(String target,
int defaultPort) {
return createSocketAddr(target, defaultPort, null);
}
/**
* Create an InetSocketAddress from the given target string and
* default port. If the string cannot be parsed correctly, the
* <code>configName</code> parameter is used as part of the
* exception message, allowing the user to better diagnose
* the misconfiguration.
*
* @param target a string of either "host" or "host:port"
* @param defaultPort the default port if <code>target</code> does not
* include a port number
* @param configName the name of the configuration from which
* <code>target</code> was loaded. This is used in the
* exception message in the case that parsing fails.
*/
public static InetSocketAddress createSocketAddr(String target,
int defaultPort,
String configName) {
String helpText = "";
if (configName != null) {
helpText = " (configuration property '" + configName + "')";
}
if (target == null) {
throw new IllegalArgumentException("Target address cannot be null." +
helpText);
}
target = target.trim();
boolean hasScheme = target.contains("://");
URI uri = null;
try {
uri = hasScheme ? URI.create(target) : URI.create("dummyscheme://"+target);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
"Does not contain a valid host:port authority: " + target + helpText
);
}
String host = uri.getHost();
int port = uri.getPort();
if (port == -1) {
port = defaultPort;
}
String path = uri.getPath();
if ((host == null) || (port < 0) ||
(!hasScheme && path != null && !path.isEmpty()))
{
throw new IllegalArgumentException(
"Does not contain a valid host:port authority: " + target + helpText
);
}
return createSocketAddrForHost(host, port);
}
/**
* Create a socket address with the given host and port. The hostname
* might be replaced with another host that was set via
* {@link #addStaticResolution(String, String)}. The value of
* hadoop.security.token.service.use_ip will determine whether the
* standard java host resolver is used, or if the fully qualified resolver
* is used.
* @param host the hostname or IP use to instantiate the object
* @param port the port number
* @return InetSocketAddress
*/
public static InetSocketAddress createSocketAddrForHost(String host, int port) {
String staticHost = getStaticResolution(host);
String resolveHost = (staticHost != null) ? staticHost : host;
InetSocketAddress addr;
try {
InetAddress iaddr = SecurityUtil.getByName(resolveHost);
// if there is a static entry for the host, make the returned
// address look like the original given host
if (staticHost != null) {
iaddr = InetAddress.getByAddress(host, iaddr.getAddress());
}
addr = new InetSocketAddress(iaddr, port);
} catch (UnknownHostException e) {
addr = InetSocketAddress.createUnresolved(host, port);
}
return addr;
}
/**
* Resolve the uri's hostname and add the default port if not in the uri
* @param uri to resolve
* @param defaultPort if none is given
* @return URI
*/
public static URI getCanonicalUri(URI uri, int defaultPort) {
// skip if there is no authority, ie. "file" scheme or relative uri
String host = uri.getHost();
if (host == null) {
return uri;
}
String fqHost = canonicalizeHost(host);
int port = uri.getPort();
// short out if already canonical with a port
if (host.equals(fqHost) && port != -1) {
return uri;
}
// reconstruct the uri with the canonical host and port
try {
uri = new URI(uri.getScheme(), uri.getUserInfo(),
fqHost, (port == -1) ? defaultPort : port,
uri.getPath(), uri.getQuery(), uri.getFragment());
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
return uri;
}
// cache the canonicalized hostnames; the cache currently isn't expired,
// but the canonicals will only change if the host's resolver configuration
// changes
private static final ConcurrentHashMap<String, String> canonicalizedHostCache =
new ConcurrentHashMap<String, String>();
private static String canonicalizeHost(String host) {
// check if the host has already been canonicalized
String fqHost = canonicalizedHostCache.get(host);
if (fqHost == null) {
try {
fqHost = SecurityUtil.getByName(host).getHostName();
// slight race condition, but won't hurt
canonicalizedHostCache.putIfAbsent(host, fqHost);
} catch (UnknownHostException e) {
fqHost = host;
}
}
return fqHost;
}
/**
* Adds a static resolution for host. This can be used for setting up
* hostnames with names that are fake to point to a well known host. For e.g.
* in some testcases we require to have daemons with different hostnames
* running on the same machine. In order to create connections to these
* daemons, one can set up mappings from those hostnames to "localhost".
* {@link NetUtils#getStaticResolution(String)} can be used to query for
* the actual hostname.
* @param host
* @param resolvedName
*/
public static void addStaticResolution(String host, String resolvedName) {
synchronized (hostToResolved) {
hostToResolved.put(host, resolvedName);
}
}
/**
* Retrieves the resolved name for the passed host. The resolved name must
* have been set earlier using
* {@link NetUtils#addStaticResolution(String, String)}
* @param host
* @return the resolution
*/
public static String getStaticResolution(String host) {
synchronized (hostToResolved) {
return hostToResolved.get(host);
}
}
/**
* This is used to get all the resolutions that were added using
* {@link NetUtils#addStaticResolution(String, String)}. The return
* value is a List each element of which contains an array of String
* of the form String[0]=hostname, String[1]=resolved-hostname
* @return the list of resolutions
*/
public static List <String[]> getAllStaticResolutions() {
synchronized (hostToResolved) {
Set <Entry <String, String>>entries = hostToResolved.entrySet();
if (entries.size() == 0) {
return null;
}
List <String[]> l = new ArrayList<String[]>(entries.size());
for (Entry<String, String> e : entries) {
l.add(new String[] {e.getKey(), e.getValue()});
}
return l;
}
}
/**
* Returns InetSocketAddress that a client can use to
* connect to the server. Server.getListenerAddress() is not correct when
* the server binds to "0.0.0.0". This returns "hostname:port" of the server,
* or "127.0.0.1:port" when the getListenerAddress() returns "0.0.0.0:port".
*
* @param server
* @return socket address that a client can use to connect to the server.
*/
public static InetSocketAddress getConnectAddress(Server server) {
return getConnectAddress(server.getListenerAddress());
}
/**
* Returns an InetSocketAddress that a client can use to connect to the
* given listening address.
*
* @param addr of a listener
* @return socket address that a client can use to connect to the server.
*/
public static InetSocketAddress getConnectAddress(InetSocketAddress addr) {
if (!addr.isUnresolved() && addr.getAddress().isAnyLocalAddress()) {
try {
addr = new InetSocketAddress(InetAddress.getLocalHost(), addr.getPort());
} catch (UnknownHostException uhe) {
// shouldn't get here unless the host doesn't have a loopback iface
addr = createSocketAddrForHost("127.0.0.1", addr.getPort());
}
}
return addr;
}
/**
* Same as <code>getInputStream(socket, socket.getSoTimeout()).</code>
* <br><br>
*
* @see #getInputStream(Socket, long)
*/
public static SocketInputWrapper getInputStream(Socket socket)
throws IOException {
return getInputStream(socket, socket.getSoTimeout());
}
/**
* Return a {@link SocketInputWrapper} for the socket and set the given
* timeout. If the socket does not have an associated channel, then its socket
* timeout will be set to the specified value. Otherwise, a
* {@link SocketInputStream} will be created which reads with the configured
* timeout.
*
* Any socket created using socket factories returned by {@link #NetUtils},
* must use this interface instead of {@link Socket#getInputStream()}.
*
* In general, this should be called only once on each socket: see the note
* in {@link SocketInputWrapper#setTimeout(long)} for more information.
*
* @see Socket#getChannel()
*
* @param socket
* @param timeout timeout in milliseconds. zero for waiting as
* long as necessary.
* @return SocketInputWrapper for reading from the socket.
* @throws IOException
*/
public static SocketInputWrapper getInputStream(Socket socket, long timeout)
throws IOException {
InputStream stm = (socket.getChannel() == null) ?
socket.getInputStream() : new SocketInputStream(socket);
SocketInputWrapper w = new SocketInputWrapper(socket, stm);
w.setTimeout(timeout);
return w;
}
/**
* Same as getOutputStream(socket, 0). Timeout of zero implies write will
* wait until data is available.<br><br>
*
* From documentation for {@link #getOutputStream(Socket, long)} : <br>
* Returns OutputStream for the socket. If the socket has an associated
* SocketChannel then it returns a
* {@link SocketOutputStream} with the given timeout. If the socket does not
* have a channel, {@link Socket#getOutputStream()} is returned. In the later
* case, the timeout argument is ignored and the write will wait until
* data is available.<br><br>
*
* Any socket created using socket factories returned by {@link NetUtils},
* must use this interface instead of {@link Socket#getOutputStream()}.
*
* @see #getOutputStream(Socket, long)
*
* @param socket
* @return OutputStream for writing to the socket.
* @throws IOException
*/
public static OutputStream getOutputStream(Socket socket)
throws IOException {
return getOutputStream(socket, 0);
}
/**
* Returns OutputStream for the socket. If the socket has an associated
* SocketChannel then it returns a
* {@link SocketOutputStream} with the given timeout. If the socket does not
* have a channel, {@link Socket#getOutputStream()} is returned. In the later
* case, the timeout argument is ignored and the write will wait until
* data is available.<br><br>
*
* Any socket created using socket factories returned by {@link NetUtils},
* must use this interface instead of {@link Socket#getOutputStream()}.
*
* @see Socket#getChannel()
*
* @param socket
* @param timeout timeout in milliseconds. This may not always apply. zero
* for waiting as long as necessary.
* @return OutputStream for writing to the socket.
* @throws IOException
*/
public static OutputStream getOutputStream(Socket socket, long timeout)
throws IOException {
return (socket.getChannel() == null) ?
socket.getOutputStream() : new SocketOutputStream(socket, timeout);
}
/**
* This is a drop-in replacement for
* {@link Socket#connect(SocketAddress, int)}.
* In the case of normal sockets that don't have associated channels, this
* just invokes <code>socket.connect(endpoint, timeout)</code>. If
* <code>socket.getChannel()</code> returns a non-null channel,
* connect is implemented using Hadoop's selectors. This is done mainly
* to avoid Sun's connect implementation from creating thread-local
* selectors, since Hadoop does not have control on when these are closed
* and could end up taking all the available file descriptors.
*
* @see java.net.Socket#connect(java.net.SocketAddress, int)
*
* @param socket
* @param address the remote address
* @param timeout timeout in milliseconds
*/
public static void connect(Socket socket,
SocketAddress address,
int timeout) throws IOException {
connect(socket, address, null, timeout);
}
/**
* Like {@link NetUtils#connect(Socket, SocketAddress, int)} but
* also takes a local address and port to bind the socket to.
*
* @param socket
* @param endpoint the remote address
* @param localAddr the local address to bind the socket to
* @param timeout timeout in milliseconds
*/
public static void connect(Socket socket,
SocketAddress endpoint,
SocketAddress localAddr,
int timeout) throws IOException {
if (socket == null || endpoint == null || timeout < 0) {
throw new IllegalArgumentException("Illegal argument for connect()");
}
SocketChannel ch = socket.getChannel();
if (localAddr != null) {
Class localClass = localAddr.getClass();
Class remoteClass = endpoint.getClass();
Preconditions.checkArgument(localClass.equals(remoteClass),
"Local address %s must be of same family as remote address %s.",
localAddr, endpoint);
socket.bind(localAddr);
}
try {
if (ch == null) {
// let the default implementation handle it.
socket.connect(endpoint, timeout);
} else {
SocketIOWithTimeout.connect(ch, endpoint, timeout);
}
} catch (SocketTimeoutException ste) {
throw new ConnectTimeoutException(ste.getMessage());
}
// There is a very rare case allowed by the TCP specification, such that
// if we are trying to connect to an endpoint on the local machine,
// and we end up choosing an ephemeral port equal to the destination port,
// we will actually end up getting connected to ourself (ie any data we
// send just comes right back). This is only possible if the target
// daemon is down, so we'll treat it like connection refused.
if (socket.getLocalPort() == socket.getPort() &&
socket.getLocalAddress().equals(socket.getInetAddress())) {
LOG.info("Detected a loopback TCP socket, disconnecting it");
socket.close();
throw new ConnectException(
"Localhost targeted connection resulted in a loopback. " +
"No daemon is listening on the target port.");
}
}
/**
* Given a string representation of a host, return its ip address
* in textual presentation.
*
* @param name a string representation of a host:
* either a textual representation its IP address or its host name
* @return its IP address in the string format
*/
public static String normalizeHostName(String name) {
try {
return InetAddress.getByName(name).getHostAddress();
} catch (UnknownHostException e) {
return name;
}
}
/**
* Given a collection of string representation of hosts, return a list of
* corresponding IP addresses in the textual representation.
*
* @param names a collection of string representations of hosts
* @return a list of corresponding IP addresses in the string format
* @see #normalizeHostName(String)
*/
public static List<String> normalizeHostNames(Collection<String> names) {
List<String> hostNames = new ArrayList<String>(names.size());
for (String name : names) {
hostNames.add(normalizeHostName(name));
}
return hostNames;
}
/**
* Performs a sanity check on the list of hostnames/IPs to verify they at least
* appear to be valid.
* @param names - List of hostnames/IPs
* @throws UnknownHostException
*/
public static void verifyHostnames(String[] names) throws UnknownHostException {
for (String name: names) {
if (name == null) {
throw new UnknownHostException("null hostname found");
}
// The first check supports URL formats (e.g. hdfs://, etc.).
// java.net.URI requires a schema, so we add a dummy one if it doesn't
// have one already.
URI uri = null;
try {
uri = new URI(name);
if (uri.getHost() == null) {
uri = new URI("http://" + name);
}
} catch (URISyntaxException e) {
uri = null;
}
if (uri == null || uri.getHost() == null) {
throw new UnknownHostException(name + " is not a valid Inet address");
}
}
}
private static final Pattern ipPortPattern = // Pattern for matching ip[:port]
Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d+)?");
/**
* Attempt to obtain the host name of the given string which contains
* an IP address and an optional port.
*
* @param ipPort string of form ip[:port]
* @return Host name or null if the name can not be determined
*/
public static String getHostNameOfIP(String ipPort) {
if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) {
return null;
}
try {
int colonIdx = ipPort.indexOf(':');
String ip = (-1 == colonIdx) ? ipPort
: ipPort.substring(0, ipPort.indexOf(':'));
return InetAddress.getByName(ip).getHostName();
} catch (UnknownHostException e) {
return null;
}
}
/**
* Return hostname without throwing exception.
* @return hostname
*/
public static String getHostname() {
try {return "" + InetAddress.getLocalHost();}
catch(UnknownHostException uhe) {return "" + uhe;}
}
/**
* Compose a "host:port" string from the address.
*/
public static String getHostPortString(InetSocketAddress addr) {
return addr.getHostName() + ":" + addr.getPort();
}
/**
* Checks if {@code host} is a local host name and return {@link InetAddress}
* corresponding to that address.
*
* @param host the specified host
* @return a valid local {@link InetAddress} or null
* @throws SocketException if an I/O error occurs
*/
public static InetAddress getLocalInetAddress(String host)
throws SocketException {
if (host == null) {
return null;
}
InetAddress addr = null;
try {
addr = SecurityUtil.getByName(host);
if (NetworkInterface.getByInetAddress(addr) == null) {
addr = null; // Not a local address
}
} catch (UnknownHostException ignore) { }
return addr;
}
/**
* Given an InetAddress, checks to see if the address is a local address, by
* comparing the address with all the interfaces on the node.
* @param addr address to check if it is local node's address
* @return true if the address corresponds to the local node
*/
public static boolean isLocalAddress(InetAddress addr) {
// Check if the address is any local or loop back
boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
// Check if the address is defined on any interface
if (!local) {
try {
local = NetworkInterface.getByInetAddress(addr) != null;
} catch (SocketException e) {
local = false;
}
}
return local;
}
/**
* Take an IOException , the local host port and remote host port details and
* return an IOException with the input exception as the cause and also
* include the host details. The new exception provides the stack trace of the
* place where the exception is thrown and some extra diagnostics information.
* If the exception is BindException or ConnectException or
* UnknownHostException or SocketTimeoutException, return a new one of the
* same type; Otherwise return an IOException.
*
* @param destHost target host (nullable)
* @param destPort target port
* @param localHost local host (nullable)
* @param localPort local port
* @param exception the caught exception.
* @return an exception to throw
*/
public static IOException wrapException(final String destHost,
final int destPort,
final String localHost,
final int localPort,
final IOException exception) {
if (exception instanceof BindException) {
return wrapWithMessage(exception,
"Problem binding to ["
+ localHost
+ ":"
+ localPort
+ "] "
+ exception
+ ";"
+ see("BindException"));
} else if (exception instanceof ConnectException) {
// connection refused; include the host:port in the error
return wrapWithMessage(exception,
"Call From "
+ localHost
+ " to "
+ destHost
+ ":"
+ destPort
+ " failed on connection exception: "
+ exception
+ ";"
+ see("ConnectionRefused"));
} else if (exception instanceof UnknownHostException) {
return wrapWithMessage(exception,
"Invalid host name: "
+ getHostDetailsAsString(destHost, destPort, localHost)
+ exception
+ ";"
+ see("UnknownHost"));
} else if (exception instanceof SocketTimeoutException) {
return wrapWithMessage(exception,
"Call From "
+ localHost + " to " + destHost + ":" + destPort
+ " failed on socket timeout exception: " + exception
+ ";"
+ see("SocketTimeout"));
} else if (exception instanceof NoRouteToHostException) {
return wrapWithMessage(exception,
"No Route to Host from "
+ localHost + " to " + destHost + ":" + destPort
+ " failed on socket timeout exception: " + exception
+ ";"
+ see("NoRouteToHost"));
} else if (exception instanceof EOFException) {
return wrapWithMessage(exception,
"End of File Exception between "
+ getHostDetailsAsString(destHost, destPort, localHost)
+ ": " + exception
+ ";"
+ see("EOFException"));
}
else {
return (IOException) new IOException("Failed on local exception: "
+ exception
+ "; Host Details : "
+ getHostDetailsAsString(destHost, destPort, localHost))
.initCause(exception);
}
}
private static String see(final String entry) {
return FOR_MORE_DETAILS_SEE + HADOOP_WIKI + entry;
}
@SuppressWarnings("unchecked")
private static <T extends IOException> T wrapWithMessage(
T exception, String msg) {
Class<? extends Throwable> clazz = exception.getClass();
try {
Constructor<? extends Throwable> ctor = clazz.getConstructor(String.class);
Throwable t = ctor.newInstance(msg);
return (T)(t.initCause(exception));
} catch (Throwable e) {
LOG.warn("Unable to wrap exception of type " +
clazz + ": it has no (String) constructor", e);
return exception;
}
}
/**
* Get the host details as a string
* @param destHost destinatioon host (nullable)
* @param destPort destination port
* @param localHost local host (nullable)
* @return a string describing the destination host:port and the local host
*/
private static String getHostDetailsAsString(final String destHost,
final int destPort,
final String localHost) {
StringBuilder hostDetails = new StringBuilder(27);
hostDetails.append("local host is: ")
.append(quoteHost(localHost))
.append("; ");
hostDetails.append("destination host is: ").append(quoteHost(destHost))
.append(":")
.append(destPort).append("; ");
return hostDetails.toString();
}
/**
* Quote a hostname if it is not null
* @param hostname the hostname; nullable
* @return a quoted hostname or {@link #UNKNOWN_HOST} if the hostname is null
*/
private static String quoteHost(final String hostname) {
return (hostname != null) ?
("\"" + hostname + "\"")
: UNKNOWN_HOST;
}
/**
* @return true if the given string is a subnet specified
* using CIDR notation, false otherwise
*/
public static boolean isValidSubnet(String subnet) {
try {
new SubnetUtils(subnet);
return true;
} catch (IllegalArgumentException iae) {
return false;
}
}
/**
* Add all addresses associated with the given nif in the
* given subnet to the given list.
*/
private static void addMatchingAddrs(NetworkInterface nif,
SubnetInfo subnetInfo, List<InetAddress> addrs) {
Enumeration<InetAddress> ifAddrs = nif.getInetAddresses();
while (ifAddrs.hasMoreElements()) {
InetAddress ifAddr = ifAddrs.nextElement();
if (subnetInfo.isInRange(ifAddr.getHostAddress())) {
addrs.add(ifAddr);
}
}
}
/**
* Return an InetAddress for each interface that matches the
* given subnet specified using CIDR notation.
*
* @param subnet subnet specified using CIDR notation
* @param returnSubinterfaces
* whether to return IPs associated with subinterfaces
* @throws IllegalArgumentException if subnet is invalid
*/
public static List<InetAddress> getIPs(String subnet,
boolean returnSubinterfaces) {
List<InetAddress> addrs = new ArrayList<InetAddress>();
SubnetInfo subnetInfo = new SubnetUtils(subnet).getInfo();
Enumeration<NetworkInterface> nifs;
try {
nifs = NetworkInterface.getNetworkInterfaces();
} catch (SocketException e) {
LOG.error("Unable to get host interfaces", e);
return addrs;
}
while (nifs.hasMoreElements()) {
NetworkInterface nif = nifs.nextElement();
// NB: adding addresses even if the nif is not up
addMatchingAddrs(nif, subnetInfo, addrs);
if (!returnSubinterfaces) {
continue;
}
Enumeration<NetworkInterface> subNifs = nif.getSubInterfaces();
while (subNifs.hasMoreElements()) {
addMatchingAddrs(subNifs.nextElement(), subnetInfo, addrs);
}
}
return addrs;
}
/**
* Return a free port number. There is no guarantee it will remain free, so
* it should be used immediately.
*
* @returns A free port for binding a local socket
*/
public static int getFreeSocketPort() {
int port = 0;
try {
ServerSocket s = new ServerSocket(0);
port = s.getLocalPort();
s.close();
return port;
} catch (IOException e) {
// Could not get a free port. Return default port 0.
}
return port;
}
}
| 33,497 | 35.529989 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.io.FilterInputStream;
import java.io.InputStream;
import java.net.Socket;
import java.net.SocketException;
import java.nio.channels.ReadableByteChannel;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Preconditions;
/**
* A wrapper stream around a socket which allows setting of its timeout. If the
* socket has a channel, this uses non-blocking IO via the package-private
* {@link SocketInputStream} implementation. Otherwise, timeouts are managed by
* setting the underlying socket timeout itself.
*/
@InterfaceAudience.LimitedPrivate("HDFS")
@InterfaceStability.Unstable
public class SocketInputWrapper extends FilterInputStream {
private final Socket socket;
private final boolean hasChannel;
SocketInputWrapper(Socket s, InputStream is) {
super(is);
this.socket = s;
this.hasChannel = s.getChannel() != null;
if (hasChannel) {
Preconditions.checkArgument(is instanceof SocketInputStream,
"Expected a SocketInputStream when there is a channel. " +
"Got: %s", is);
}
}
/**
* Set the timeout for reads from this stream.
*
* Note: the behavior here can differ subtly depending on whether the
* underlying socket has an associated Channel. In particular, if there is no
* channel, then this call will affect the socket timeout for <em>all</em>
* readers of this socket. If there is a channel, then this call will affect
* the timeout only for <em>this</em> stream. As such, it is recommended to
* only create one {@link SocketInputWrapper} instance per socket.
*
* @param timeoutMs
* the new timeout, 0 for no timeout
* @throws SocketException
* if the timeout cannot be set
*/
public void setTimeout(long timeoutMs) throws SocketException {
if (hasChannel) {
((SocketInputStream)in).setTimeout(timeoutMs);
} else {
socket.setSoTimeout((int)timeoutMs);
}
}
/**
* @return an underlying ReadableByteChannel implementation.
* @throws IllegalStateException if this socket does not have a channel
*/
public ReadableByteChannel getReadableByteChannel() {
Preconditions.checkState(hasChannel,
"Socket %s does not have a channel",
this.socket);
return (SocketInputStream)in;
}
}
| 3,220 | 35.602273 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/AbstractDNSToSwitchMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* This is a base class for DNS to Switch mappings. <p/> It is not mandatory to
* derive {@link DNSToSwitchMapping} implementations from it, but it is strongly
* recommended, as it makes it easy for the Hadoop developers to add new methods
* to this base class that are automatically picked up by all implementations.
* <p/>
*
* This class does not extend the <code>Configured</code>
* base class, and should not be changed to do so, as it causes problems
* for subclasses. The constructor of the <code>Configured</code> calls
* the {@link #setConf(Configuration)} method, which will call into the
* subclasses before they have been fully constructed.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class AbstractDNSToSwitchMapping
implements DNSToSwitchMapping, Configurable {
private Configuration conf;
/**
* Create an unconfigured instance
*/
protected AbstractDNSToSwitchMapping() {
}
/**
* Create an instance, caching the configuration file.
* This constructor does not call {@link #setConf(Configuration)}; if
* a subclass extracts information in that method, it must call it explicitly.
* @param conf the configuration
*/
protected AbstractDNSToSwitchMapping(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* Predicate that indicates that the switch mapping is known to be
* single-switch. The base class returns false: it assumes all mappings are
* multi-rack. Subclasses may override this with methods that are more aware
* of their topologies.
*
* <p/>
*
* This method is used when parts of Hadoop need know whether to apply
* single rack vs multi-rack policies, such as during block placement.
* Such algorithms behave differently if they are on multi-switch systems.
* </p>
*
* @return true if the mapping thinks that it is on a single switch
*/
public boolean isSingleSwitch() {
return false;
}
/**
* Get a copy of the map (for diagnostics)
* @return a clone of the map or null for none known
*/
public Map<String, String> getSwitchMap() {
return null;
}
/**
* Generate a string listing the switch mapping implementation,
* the mapping for every known node and the number of nodes and
* unique switches known about -each entry to a separate line.
* @return a string that can be presented to the ops team or used in
* debug messages.
*/
public String dumpTopology() {
Map<String, String> rack = getSwitchMap();
StringBuilder builder = new StringBuilder();
builder.append("Mapping: ").append(toString()).append("\n");
if (rack != null) {
builder.append("Map:\n");
Set<String> switches = new HashSet<String>();
for (Map.Entry<String, String> entry : rack.entrySet()) {
builder.append(" ")
.append(entry.getKey())
.append(" -> ")
.append(entry.getValue())
.append("\n");
switches.add(entry.getValue());
}
builder.append("Nodes: ").append(rack.size()).append("\n");
builder.append("Switches: ").append(switches.size()).append("\n");
} else {
builder.append("No topology information");
}
return builder.toString();
}
protected boolean isSingleSwitchByScriptPolicy() {
return conf != null
&& conf.get(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null;
}
/**
* Query for a {@link DNSToSwitchMapping} instance being on a single
* switch.
* <p/>
* This predicate simply assumes that all mappings not derived from
* this class are multi-switch.
* @param mapping the mapping to query
* @return true if the base class says it is single switch, or the mapping
* is not derived from this class.
*/
public static boolean isMappingSingleSwitch(DNSToSwitchMapping mapping) {
return mapping != null && mapping instanceof AbstractDNSToSwitchMapping
&& ((AbstractDNSToSwitchMapping) mapping).isSingleSwitch();
}
}
| 5,336 | 33.211538 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The class extends NetworkTopology to represents a cluster of computer with
* a 4-layers hierarchical network topology.
* In this network topology, leaves represent data nodes (computers) and inner
* nodes represent switches/routers that manage traffic in/out of data centers,
* racks or physical host (with virtual switch).
*
* @see NetworkTopology
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class NetworkTopologyWithNodeGroup extends NetworkTopology {
public final static String DEFAULT_NODEGROUP = "/default-nodegroup";
public NetworkTopologyWithNodeGroup() {
clusterMap = new InnerNodeWithNodeGroup(InnerNode.ROOT);
}
@Override
protected Node getNodeForNetworkLocation(Node node) {
// if node only with default rack info, here we need to add default
// nodegroup info
if (NetworkTopology.DEFAULT_RACK.equals(node.getNetworkLocation())) {
node.setNetworkLocation(node.getNetworkLocation()
+ DEFAULT_NODEGROUP);
}
Node nodeGroup = getNode(node.getNetworkLocation());
if (nodeGroup == null) {
nodeGroup = new InnerNodeWithNodeGroup(node.getNetworkLocation());
}
return getNode(nodeGroup.getNetworkLocation());
}
@Override
public String getRack(String loc) {
netlock.readLock().lock();
try {
loc = InnerNode.normalize(loc);
Node locNode = getNode(loc);
if (locNode instanceof InnerNodeWithNodeGroup) {
InnerNodeWithNodeGroup node = (InnerNodeWithNodeGroup) locNode;
if (node.isRack()) {
return loc;
} else if (node.isNodeGroup()) {
return node.getNetworkLocation();
} else {
// may be a data center
return null;
}
} else {
// not in cluster map, don't handle it
return loc;
}
} finally {
netlock.readLock().unlock();
}
}
/**
* Given a string representation of a node group for a specific network
* location
*
* @param loc
* a path-like string representation of a network location
* @return a node group string
*/
public String getNodeGroup(String loc) {
netlock.readLock().lock();
try {
loc = InnerNode.normalize(loc);
Node locNode = getNode(loc);
if (locNode instanceof InnerNodeWithNodeGroup) {
InnerNodeWithNodeGroup node = (InnerNodeWithNodeGroup) locNode;
if (node.isNodeGroup()) {
return loc;
} else if (node.isRack()) {
// not sure the node group for a rack
return null;
} else {
// may be a leaf node
return getNodeGroup(node.getNetworkLocation());
}
} else {
// not in cluster map, don't handle it
return loc;
}
} finally {
netlock.readLock().unlock();
}
}
@Override
public boolean isOnSameRack( Node node1, Node node2) {
if (node1 == null || node2 == null ||
node1.getParent() == null || node2.getParent() == null) {
return false;
}
netlock.readLock().lock();
try {
return isSameParents(node1.getParent(), node2.getParent());
} finally {
netlock.readLock().unlock();
}
}
/**
* Check if two nodes are on the same node group (hypervisor) The
* assumption here is: each nodes are leaf nodes.
*
* @param node1
* one node (can be null)
* @param node2
* another node (can be null)
* @return true if node1 and node2 are on the same node group; false
* otherwise
* @exception IllegalArgumentException
* when either node1 or node2 is null, or node1 or node2 do
* not belong to the cluster
*/
@Override
public boolean isOnSameNodeGroup(Node node1, Node node2) {
if (node1 == null || node2 == null) {
return false;
}
netlock.readLock().lock();
try {
return isSameParents(node1, node2);
} finally {
netlock.readLock().unlock();
}
}
/**
* Check if network topology is aware of NodeGroup
*/
@Override
public boolean isNodeGroupAware() {
return true;
}
/** Add a leaf node
* Update node counter & rack counter if necessary
* @param node node to be added; can be null
* @exception IllegalArgumentException if add a node to a leave
* or node to be added is not a leaf
*/
@Override
public void add(Node node) {
if (node==null) return;
if( node instanceof InnerNode ) {
throw new IllegalArgumentException(
"Not allow to add an inner node: "+NodeBase.getPath(node));
}
netlock.writeLock().lock();
try {
Node rack = null;
// if node only with default rack info, here we need to add default
// nodegroup info
if (NetworkTopology.DEFAULT_RACK.equals(node.getNetworkLocation())) {
node.setNetworkLocation(node.getNetworkLocation() +
NetworkTopologyWithNodeGroup.DEFAULT_NODEGROUP);
}
Node nodeGroup = getNode(node.getNetworkLocation());
if (nodeGroup == null) {
nodeGroup = new InnerNodeWithNodeGroup(node.getNetworkLocation());
}
rack = getNode(nodeGroup.getNetworkLocation());
// rack should be an innerNode and with parent.
// note: rack's null parent case is: node's topology only has one layer,
// so rack is recognized as "/" and no parent.
// This will be recognized as a node with fault topology.
if (rack != null &&
(!(rack instanceof InnerNode) || rack.getParent() == null)) {
throw new IllegalArgumentException("Unexpected data node "
+ node.toString()
+ " at an illegal network location");
}
if (clusterMap.add(node)) {
LOG.info("Adding a new node: " + NodeBase.getPath(node));
if (rack == null) {
// We only track rack number here
numOfRacks++;
}
}
if(LOG.isDebugEnabled()) {
LOG.debug("NetworkTopology became:\n" + this.toString());
}
} finally {
netlock.writeLock().unlock();
}
}
/** Remove a node
* Update node counter and rack counter if necessary
* @param node node to be removed; can be null
*/
@Override
public void remove(Node node) {
if (node==null) return;
if( node instanceof InnerNode ) {
throw new IllegalArgumentException(
"Not allow to remove an inner node: "+NodeBase.getPath(node));
}
LOG.info("Removing a node: "+NodeBase.getPath(node));
netlock.writeLock().lock();
try {
if (clusterMap.remove(node)) {
Node nodeGroup = getNode(node.getNetworkLocation());
if (nodeGroup == null) {
nodeGroup = new InnerNode(node.getNetworkLocation());
}
InnerNode rack = (InnerNode)getNode(nodeGroup.getNetworkLocation());
if (rack == null) {
numOfRacks--;
}
}
if(LOG.isDebugEnabled()) {
LOG.debug("NetworkTopology became:\n" + this.toString());
}
} finally {
netlock.writeLock().unlock();
}
}
@Override
protected int getWeight(Node reader, Node node) {
// 0 is local, 1 is same node group, 2 is same rack, 3 is off rack
// Start off by initializing to off rack
int weight = 3;
if (reader != null) {
if (reader.equals(node)) {
weight = 0;
} else if (isOnSameNodeGroup(reader, node)) {
weight = 1;
} else if (isOnSameRack(reader, node)) {
weight = 2;
}
}
return weight;
}
/**
* Sort nodes array by their distances to <i>reader</i>.
* <p/>
* This is the same as {@link NetworkTopology#sortByDistance(Node, Node[],
* int)} except with a four-level network topology which contains the
* additional network distance of a "node group" which is between local and
* same rack.
*
* @param reader Node where data will be read
* @param nodes Available replicas with the requested data
* @param activeLen Number of active nodes at the front of the array
*/
@Override
public void sortByDistance(Node reader, Node[] nodes, int activeLen) {
// If reader is not a datanode (not in NetworkTopology tree), we need to
// replace this reader with a sibling leaf node in tree.
if (reader != null && !this.contains(reader)) {
Node nodeGroup = getNode(reader.getNetworkLocation());
if (nodeGroup != null && nodeGroup instanceof InnerNode) {
InnerNode parentNode = (InnerNode) nodeGroup;
// replace reader with the first children of its parent in tree
reader = parentNode.getLeaf(0, null);
} else {
return;
}
}
super.sortByDistance(reader, nodes, activeLen);
}
/** InnerNodeWithNodeGroup represents a switch/router of a data center, rack
* or physical host. Different from a leaf node, it has non-null children.
*/
static class InnerNodeWithNodeGroup extends InnerNode {
public InnerNodeWithNodeGroup(String name, String location,
InnerNode parent, int level) {
super(name, location, parent, level);
}
public InnerNodeWithNodeGroup(String name, String location) {
super(name, location);
}
public InnerNodeWithNodeGroup(String path) {
super(path);
}
@Override
boolean isRack() {
// it is node group
if (getChildren().isEmpty()) {
return false;
}
Node firstChild = children.get(0);
if (firstChild instanceof InnerNode) {
Node firstGrandChild = (((InnerNode) firstChild).children).get(0);
if (firstGrandChild instanceof InnerNode) {
// it is datacenter
return false;
} else {
return true;
}
}
return false;
}
/**
* Judge if this node represents a node group
*
* @return true if it has no child or its children are not InnerNodes
*/
boolean isNodeGroup() {
if (children.isEmpty()) {
return true;
}
Node firstChild = children.get(0);
if (firstChild instanceof InnerNode) {
// it is rack or datacenter
return false;
}
return true;
}
@Override
protected boolean isLeafParent() {
return isNodeGroup();
}
@Override
protected InnerNode createParentNode(String parentName) {
return new InnerNodeWithNodeGroup(parentName, getPath(this), this,
this.getLevel() + 1);
}
@Override
protected boolean areChildrenLeaves() {
return isNodeGroup();
}
}
}
| 11,591 | 30.414634 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
/** The class represents a cluster of computer with a tree hierarchical
* network topology.
* For example, a cluster may be consists of many data centers filled
* with racks of computers.
* In a network topology, leaves represent data nodes (computers) and inner
* nodes represent switches/routers that manage traffic in/out of data centers
* or racks.
*
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class NetworkTopology {
public final static String DEFAULT_RACK = "/default-rack";
public final static int DEFAULT_HOST_LEVEL = 2;
public static final Log LOG =
LogFactory.getLog(NetworkTopology.class);
public static class InvalidTopologyException extends RuntimeException {
private static final long serialVersionUID = 1L;
public InvalidTopologyException(String msg) {
super(msg);
}
}
/**
* Get an instance of NetworkTopology based on the value of the configuration
* parameter net.topology.impl.
*
* @param conf the configuration to be used
* @return an instance of NetworkTopology
*/
public static NetworkTopology getInstance(Configuration conf){
return ReflectionUtils.newInstance(
conf.getClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
NetworkTopology.class, NetworkTopology.class), conf);
}
/** InnerNode represents a switch/router of a data center or rack.
* Different from a leaf node, it has non-null children.
*/
static class InnerNode extends NodeBase {
protected List<Node> children=new ArrayList<Node>();
private Map<String, Node> childrenMap = new HashMap<String, Node>();
private int numOfLeaves;
/** Construct an InnerNode from a path-like string */
InnerNode(String path) {
super(path);
}
/** Construct an InnerNode from its name and its network location */
InnerNode(String name, String location) {
super(name, location);
}
/** Construct an InnerNode
* from its name, its network location, its parent, and its level */
InnerNode(String name, String location, InnerNode parent, int level) {
super(name, location, parent, level);
}
/** @return its children */
List<Node> getChildren() {return children;}
/** @return the number of children this node has */
int getNumOfChildren() {
return children.size();
}
/** Judge if this node represents a rack
* @return true if it has no child or its children are not InnerNodes
*/
boolean isRack() {
if (children.isEmpty()) {
return true;
}
Node firstChild = children.get(0);
if (firstChild instanceof InnerNode) {
return false;
}
return true;
}
/** Judge if this node is an ancestor of node <i>n</i>
*
* @param n a node
* @return true if this node is an ancestor of <i>n</i>
*/
boolean isAncestor(Node n) {
return getPath(this).equals(NodeBase.PATH_SEPARATOR_STR) ||
(n.getNetworkLocation()+NodeBase.PATH_SEPARATOR_STR).
startsWith(getPath(this)+NodeBase.PATH_SEPARATOR_STR);
}
/** Judge if this node is the parent of node <i>n</i>
*
* @param n a node
* @return true if this node is the parent of <i>n</i>
*/
boolean isParent(Node n) {
return n.getNetworkLocation().equals(getPath(this));
}
/* Return a child name of this node who is an ancestor of node <i>n</i> */
private String getNextAncestorName(Node n) {
if (!isAncestor(n)) {
throw new IllegalArgumentException(
this + "is not an ancestor of " + n);
}
String name = n.getNetworkLocation().substring(getPath(this).length());
if (name.charAt(0) == PATH_SEPARATOR) {
name = name.substring(1);
}
int index=name.indexOf(PATH_SEPARATOR);
if (index !=-1)
name = name.substring(0, index);
return name;
}
/** Add node <i>n</i> to the subtree of this node
* @param n node to be added
* @return true if the node is added; false otherwise
*/
boolean add(Node n) {
if (!isAncestor(n))
throw new IllegalArgumentException(n.getName()+", which is located at "
+n.getNetworkLocation()+", is not a decendent of "
+getPath(this));
if (isParent(n)) {
// this node is the parent of n; add n directly
n.setParent(this);
n.setLevel(this.level+1);
Node prev = childrenMap.put(n.getName(), n);
if (prev != null) {
for(int i=0; i<children.size(); i++) {
if (children.get(i).getName().equals(n.getName())) {
children.set(i, n);
return false;
}
}
}
children.add(n);
numOfLeaves++;
return true;
} else {
// find the next ancestor node
String parentName = getNextAncestorName(n);
InnerNode parentNode = (InnerNode)childrenMap.get(parentName);
if (parentNode == null) {
// create a new InnerNode
parentNode = createParentNode(parentName);
children.add(parentNode);
childrenMap.put(parentNode.getName(), parentNode);
}
// add n to the subtree of the next ancestor node
if (parentNode.add(n)) {
numOfLeaves++;
return true;
} else {
return false;
}
}
}
/**
* Creates a parent node to be added to the list of children.
* Creates a node using the InnerNode four argument constructor specifying
* the name, location, parent, and level of this node.
*
* <p>To be overridden in subclasses for specific InnerNode implementations,
* as alternative to overriding the full {@link #add(Node)} method.
*
* @param parentName The name of the parent node
* @return A new inner node
* @see InnerNode#InnerNode(String, String, InnerNode, int)
*/
protected InnerNode createParentNode(String parentName) {
return new InnerNode(parentName, getPath(this), this, this.getLevel()+1);
}
/** Remove node <i>n</i> from the subtree of this node
* @param n node to be deleted
* @return true if the node is deleted; false otherwise
*/
boolean remove(Node n) {
String parent = n.getNetworkLocation();
String currentPath = getPath(this);
if (!isAncestor(n))
throw new IllegalArgumentException(n.getName()
+", which is located at "
+parent+", is not a descendent of "+currentPath);
if (isParent(n)) {
// this node is the parent of n; remove n directly
if (childrenMap.containsKey(n.getName())) {
for (int i=0; i<children.size(); i++) {
if (children.get(i).getName().equals(n.getName())) {
children.remove(i);
childrenMap.remove(n.getName());
numOfLeaves--;
n.setParent(null);
return true;
}
}
}
return false;
} else {
// find the next ancestor node: the parent node
String parentName = getNextAncestorName(n);
InnerNode parentNode = null;
int i;
for(i=0; i<children.size(); i++) {
if (children.get(i).getName().equals(parentName)) {
parentNode = (InnerNode)children.get(i);
break;
}
}
if (parentNode==null) {
return false;
}
// remove n from the parent node
boolean isRemoved = parentNode.remove(n);
// if the parent node has no children, remove the parent node too
if (isRemoved) {
if (parentNode.getNumOfChildren() == 0) {
Node prev = children.remove(i);
childrenMap.remove(prev.getName());
}
numOfLeaves--;
}
return isRemoved;
}
} // end of remove
/** Given a node's string representation, return a reference to the node
* @param loc string location of the form /rack/node
* @return null if the node is not found or the childnode is there but
* not an instance of {@link InnerNode}
*/
private Node getLoc(String loc) {
if (loc == null || loc.length() == 0) return this;
String[] path = loc.split(PATH_SEPARATOR_STR, 2);
Node childnode = childrenMap.get(path[0]);
if (childnode == null) return null; // non-existing node
if (path.length == 1) return childnode;
if (childnode instanceof InnerNode) {
return ((InnerNode)childnode).getLoc(path[1]);
} else {
return null;
}
}
/** get <i>leafIndex</i> leaf of this subtree
* if it is not in the <i>excludedNode</i>
*
* @param leafIndex an indexed leaf of the node
* @param excludedNode an excluded node (can be null)
* @return
*/
Node getLeaf(int leafIndex, Node excludedNode) {
int count=0;
// check if the excluded node a leaf
boolean isLeaf =
excludedNode == null || !(excludedNode instanceof InnerNode);
// calculate the total number of excluded leaf nodes
int numOfExcludedLeaves =
isLeaf ? 1 : ((InnerNode)excludedNode).getNumOfLeaves();
if (isLeafParent()) { // children are leaves
if (isLeaf) { // excluded node is a leaf node
if (excludedNode != null &&
childrenMap.containsKey(excludedNode.getName())) {
int excludedIndex = children.indexOf(excludedNode);
if (excludedIndex != -1 && leafIndex >= 0) {
// excluded node is one of the children so adjust the leaf index
leafIndex = leafIndex>=excludedIndex ? leafIndex+1 : leafIndex;
}
}
}
// range check
if (leafIndex<0 || leafIndex>=this.getNumOfChildren()) {
return null;
}
return children.get(leafIndex);
} else {
for(int i=0; i<children.size(); i++) {
InnerNode child = (InnerNode)children.get(i);
if (excludedNode == null || excludedNode != child) {
// not the excludedNode
int numOfLeaves = child.getNumOfLeaves();
if (excludedNode != null && child.isAncestor(excludedNode)) {
numOfLeaves -= numOfExcludedLeaves;
}
if (count+numOfLeaves > leafIndex) {
// the leaf is in the child subtree
return child.getLeaf(leafIndex-count, excludedNode);
} else {
// go to the next child
count = count+numOfLeaves;
}
} else { // it is the excluededNode
// skip it and set the excludedNode to be null
excludedNode = null;
}
}
return null;
}
}
protected boolean isLeafParent() {
return isRack();
}
/**
* Determine if children a leaves, default implementation calls {@link #isRack()}
* <p>To be overridden in subclasses for specific InnerNode implementations,
* as alternative to overriding the full {@link #getLeaf(int, Node)} method.
*
* @return true if children are leaves, false otherwise
*/
protected boolean areChildrenLeaves() {
return isRack();
}
/**
* Get number of leaves.
*/
int getNumOfLeaves() {
return numOfLeaves;
}
} // end of InnerNode
/**
* the root cluster map
*/
InnerNode clusterMap;
/** Depth of all leaf nodes */
private int depthOfAllLeaves = -1;
/** rack counter */
protected int numOfRacks = 0;
/** the lock used to manage access */
protected ReadWriteLock netlock = new ReentrantReadWriteLock();
public NetworkTopology() {
clusterMap = new InnerNode(InnerNode.ROOT);
}
/** Add a leaf node
* Update node counter & rack counter if necessary
* @param node node to be added; can be null
* @exception IllegalArgumentException if add a node to a leave
or node to be added is not a leaf
*/
public void add(Node node) {
if (node==null) return;
int newDepth = NodeBase.locationToDepth(node.getNetworkLocation()) + 1;
netlock.writeLock().lock();
try {
if( node instanceof InnerNode ) {
throw new IllegalArgumentException(
"Not allow to add an inner node: "+NodeBase.getPath(node));
}
if ((depthOfAllLeaves != -1) && (depthOfAllLeaves != newDepth)) {
LOG.error("Error: can't add leaf node " + NodeBase.getPath(node) +
" at depth " + newDepth + " to topology:\n" + this.toString());
throw new InvalidTopologyException("Failed to add " + NodeBase.getPath(node) +
": You cannot have a rack and a non-rack node at the same " +
"level of the network topology.");
}
Node rack = getNodeForNetworkLocation(node);
if (rack != null && !(rack instanceof InnerNode)) {
throw new IllegalArgumentException("Unexpected data node "
+ node.toString()
+ " at an illegal network location");
}
if (clusterMap.add(node)) {
LOG.info("Adding a new node: "+NodeBase.getPath(node));
if (rack == null) {
numOfRacks++;
}
if (!(node instanceof InnerNode)) {
if (depthOfAllLeaves == -1) {
depthOfAllLeaves = node.getLevel();
}
}
}
if(LOG.isDebugEnabled()) {
LOG.debug("NetworkTopology became:\n" + this.toString());
}
} finally {
netlock.writeLock().unlock();
}
}
/**
* Return a reference to the node given its string representation.
* Default implementation delegates to {@link #getNode(String)}.
*
* <p>To be overridden in subclasses for specific NetworkTopology
* implementations, as alternative to overriding the full {@link #add(Node)}
* method.
*
* @param node The string representation of this node's network location is
* used to retrieve a Node object.
* @return a reference to the node; null if the node is not in the tree
*
* @see #add(Node)
* @see #getNode(String)
*/
protected Node getNodeForNetworkLocation(Node node) {
return getNode(node.getNetworkLocation());
}
/**
* Given a string representation of a rack, return its children
* @param loc a path-like string representation of a rack
* @return a newly allocated list with all the node's children
*/
public List<Node> getDatanodesInRack(String loc) {
netlock.readLock().lock();
try {
loc = NodeBase.normalize(loc);
if (!NodeBase.ROOT.equals(loc)) {
loc = loc.substring(1);
}
InnerNode rack = (InnerNode) clusterMap.getLoc(loc);
if (rack == null) {
return null;
}
return new ArrayList<Node>(rack.getChildren());
} finally {
netlock.readLock().unlock();
}
}
/** Remove a node
* Update node counter and rack counter if necessary
* @param node node to be removed; can be null
*/
public void remove(Node node) {
if (node==null) return;
if( node instanceof InnerNode ) {
throw new IllegalArgumentException(
"Not allow to remove an inner node: "+NodeBase.getPath(node));
}
LOG.info("Removing a node: "+NodeBase.getPath(node));
netlock.writeLock().lock();
try {
if (clusterMap.remove(node)) {
InnerNode rack = (InnerNode)getNode(node.getNetworkLocation());
if (rack == null) {
numOfRacks--;
}
}
if(LOG.isDebugEnabled()) {
LOG.debug("NetworkTopology became:\n" + this.toString());
}
} finally {
netlock.writeLock().unlock();
}
}
/** Check if the tree contains node <i>node</i>
*
* @param node a node
* @return true if <i>node</i> is already in the tree; false otherwise
*/
public boolean contains(Node node) {
if (node == null) return false;
netlock.readLock().lock();
try {
Node parent = node.getParent();
for (int level = node.getLevel(); parent != null && level > 0;
parent = parent.getParent(), level--) {
if (parent == clusterMap) {
return true;
}
}
} finally {
netlock.readLock().unlock();
}
return false;
}
/** Given a string representation of a node, return its reference
*
* @param loc
* a path-like string representation of a node
* @return a reference to the node; null if the node is not in the tree
*/
public Node getNode(String loc) {
netlock.readLock().lock();
try {
loc = NodeBase.normalize(loc);
if (!NodeBase.ROOT.equals(loc))
loc = loc.substring(1);
return clusterMap.getLoc(loc);
} finally {
netlock.readLock().unlock();
}
}
/** Given a string representation of a rack for a specific network
* location
*
* To be overridden in subclasses for specific NetworkTopology
* implementations, as alternative to overriding the full
* {@link #getRack(String)} method.
* @param loc
* a path-like string representation of a network location
* @return a rack string
*/
public String getRack(String loc) {
return loc;
}
/** @return the total number of racks */
public int getNumOfRacks() {
netlock.readLock().lock();
try {
return numOfRacks;
} finally {
netlock.readLock().unlock();
}
}
/** @return the total number of leaf nodes */
public int getNumOfLeaves() {
netlock.readLock().lock();
try {
return clusterMap.getNumOfLeaves();
} finally {
netlock.readLock().unlock();
}
}
/** Return the distance between two nodes
* It is assumed that the distance from one node to its parent is 1
* The distance between two nodes is calculated by summing up their distances
* to their closest common ancestor.
* @param node1 one node
* @param node2 another node
* @return the distance between node1 and node2 which is zero if they are the same
* or {@link Integer#MAX_VALUE} if node1 or node2 do not belong to the cluster
*/
public int getDistance(Node node1, Node node2) {
if (node1 == node2) {
return 0;
}
Node n1=node1, n2=node2;
int dis = 0;
netlock.readLock().lock();
try {
int level1=node1.getLevel(), level2=node2.getLevel();
while(n1!=null && level1>level2) {
n1 = n1.getParent();
level1--;
dis++;
}
while(n2!=null && level2>level1) {
n2 = n2.getParent();
level2--;
dis++;
}
while(n1!=null && n2!=null && n1.getParent()!=n2.getParent()) {
n1=n1.getParent();
n2=n2.getParent();
dis+=2;
}
} finally {
netlock.readLock().unlock();
}
if (n1==null) {
LOG.warn("The cluster does not contain node: "+NodeBase.getPath(node1));
return Integer.MAX_VALUE;
}
if (n2==null) {
LOG.warn("The cluster does not contain node: "+NodeBase.getPath(node2));
return Integer.MAX_VALUE;
}
return dis+2;
}
/** Check if two nodes are on the same rack
* @param node1 one node (can be null)
* @param node2 another node (can be null)
* @return true if node1 and node2 are on the same rack; false otherwise
* @exception IllegalArgumentException when either node1 or node2 is null, or
* node1 or node2 do not belong to the cluster
*/
public boolean isOnSameRack( Node node1, Node node2) {
if (node1 == null || node2 == null) {
return false;
}
netlock.readLock().lock();
try {
return isSameParents(node1, node2);
} finally {
netlock.readLock().unlock();
}
}
/**
* Check if network topology is aware of NodeGroup
*/
public boolean isNodeGroupAware() {
return false;
}
/**
* Return false directly as not aware of NodeGroup, to be override in sub-class
*/
public boolean isOnSameNodeGroup(Node node1, Node node2) {
return false;
}
/**
* Compare the parents of each node for equality
*
* <p>To be overridden in subclasses for specific NetworkTopology
* implementations, as alternative to overriding the full
* {@link #isOnSameRack(Node, Node)} method.
*
* @param node1 the first node to compare
* @param node2 the second node to compare
* @return true if their parents are equal, false otherwise
*
* @see #isOnSameRack(Node, Node)
*/
protected boolean isSameParents(Node node1, Node node2) {
return node1.getParent()==node2.getParent();
}
private static final Random r = new Random();
@VisibleForTesting
void setRandomSeed(long seed) {
r.setSeed(seed);
}
/** randomly choose one node from <i>scope</i>
* if scope starts with ~, choose one from the all nodes except for the
* ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
* @param scope range of nodes from which a node will be chosen
* @return the chosen node
*/
public Node chooseRandom(String scope) {
netlock.readLock().lock();
try {
if (scope.startsWith("~")) {
return chooseRandom(NodeBase.ROOT, scope.substring(1));
} else {
return chooseRandom(scope, null);
}
} finally {
netlock.readLock().unlock();
}
}
private Node chooseRandom(String scope, String excludedScope){
if (excludedScope != null) {
if (scope.startsWith(excludedScope)) {
return null;
}
if (!excludedScope.startsWith(scope)) {
excludedScope = null;
}
}
Node node = getNode(scope);
if (!(node instanceof InnerNode)) {
return node;
}
InnerNode innerNode = (InnerNode)node;
int numOfDatanodes = innerNode.getNumOfLeaves();
if (excludedScope == null) {
node = null;
} else {
node = getNode(excludedScope);
if (!(node instanceof InnerNode)) {
numOfDatanodes -= 1;
} else {
numOfDatanodes -= ((InnerNode)node).getNumOfLeaves();
}
}
if (numOfDatanodes == 0) {
throw new InvalidTopologyException(
"Failed to find datanode (scope=\"" + String.valueOf(scope) +
"\" excludedScope=\"" + String.valueOf(excludedScope) + "\").");
}
int leaveIndex = r.nextInt(numOfDatanodes);
return innerNode.getLeaf(leaveIndex, node);
}
/** return leaves in <i>scope</i>
* @param scope a path string
* @return leaves nodes under specific scope
*/
public List<Node> getLeaves(String scope) {
Node node = getNode(scope);
List<Node> leafNodes = new ArrayList<Node>();
if (!(node instanceof InnerNode)) {
leafNodes.add(node);
} else {
InnerNode innerNode = (InnerNode) node;
for (int i=0;i<innerNode.getNumOfLeaves();i++) {
leafNodes.add(innerNode.getLeaf(i, null));
}
}
return leafNodes;
}
/** return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i>
* if scope starts with ~, return the number of nodes that are not
* in <i>scope</i> and <i>excludedNodes</i>;
* @param scope a path string that may start with ~
* @param excludedNodes a list of nodes
* @return number of available nodes
*/
public int countNumOfAvailableNodes(String scope,
Collection<Node> excludedNodes) {
boolean isExcluded=false;
if (scope.startsWith("~")) {
isExcluded=true;
scope=scope.substring(1);
}
scope = NodeBase.normalize(scope);
int excludedCountInScope = 0; // the number of nodes in both scope & excludedNodes
int excludedCountOffScope = 0; // the number of nodes outside scope & excludedNodes
netlock.readLock().lock();
try {
for (Node node : excludedNodes) {
node = getNode(NodeBase.getPath(node));
if (node == null) {
continue;
}
if ((NodeBase.getPath(node) + NodeBase.PATH_SEPARATOR_STR)
.startsWith(scope + NodeBase.PATH_SEPARATOR_STR)) {
excludedCountInScope++;
} else {
excludedCountOffScope++;
}
}
Node n = getNode(scope);
int scopeNodeCount = 0;
if (n != null) {
scopeNodeCount++;
}
if (n instanceof InnerNode) {
scopeNodeCount=((InnerNode)n).getNumOfLeaves();
}
if (isExcluded) {
return clusterMap.getNumOfLeaves() - scopeNodeCount
- excludedCountOffScope;
} else {
return scopeNodeCount - excludedCountInScope;
}
} finally {
netlock.readLock().unlock();
}
}
/** convert a network tree to a string */
@Override
public String toString() {
// print the number of racks
StringBuilder tree = new StringBuilder();
tree.append("Number of racks: ");
tree.append(numOfRacks);
tree.append("\n");
// print the number of leaves
int numOfLeaves = getNumOfLeaves();
tree.append("Expected number of leaves:");
tree.append(numOfLeaves);
tree.append("\n");
// print nodes
for(int i=0; i<numOfLeaves; i++) {
tree.append(NodeBase.getPath(clusterMap.getLeaf(i, null)));
tree.append("\n");
}
return tree.toString();
}
/**
* Divide networklocation string into two parts by last separator, and get
* the first part here.
*
* @param networkLocation
* @return
*/
public static String getFirstHalf(String networkLocation) {
int index = networkLocation.lastIndexOf(NodeBase.PATH_SEPARATOR_STR);
return networkLocation.substring(0, index);
}
/**
* Divide networklocation string into two parts by last separator, and get
* the second part here.
*
* @param networkLocation
* @return
*/
public static String getLastHalf(String networkLocation) {
int index = networkLocation.lastIndexOf(NodeBase.PATH_SEPARATOR_STR);
return networkLocation.substring(index);
}
/**
* Returns an integer weight which specifies how far away {node} is away from
* {reader}. A lower value signifies that a node is closer.
*
* @param reader Node where data will be read
* @param node Replica of data
* @return weight
*/
protected int getWeight(Node reader, Node node) {
// 0 is local, 1 is same rack, 2 is off rack
// Start off by initializing to off rack
int weight = 2;
if (reader != null) {
if (reader.equals(node)) {
weight = 0;
} else if (isOnSameRack(reader, node)) {
weight = 1;
}
}
return weight;
}
/**
* Sort nodes array by network distance to <i>reader</i>.
* <p/>
* In a three-level topology, a node can be either local, on the same rack,
* or on a different rack from the reader. Sorting the nodes based on network
* distance from the reader reduces network traffic and improves
* performance.
* <p/>
* As an additional twist, we also randomize the nodes at each network
* distance. This helps with load balancing when there is data skew.
*
* @param reader Node where data will be read
* @param nodes Available replicas with the requested data
* @param activeLen Number of active nodes at the front of the array
*/
public void sortByDistance(Node reader, Node[] nodes, int activeLen) {
/** Sort weights for the nodes array */
int[] weights = new int[activeLen];
for (int i=0; i<activeLen; i++) {
weights[i] = getWeight(reader, nodes[i]);
}
// Add weight/node pairs to a TreeMap to sort
TreeMap<Integer, List<Node>> tree = new TreeMap<Integer, List<Node>>();
for (int i=0; i<activeLen; i++) {
int weight = weights[i];
Node node = nodes[i];
List<Node> list = tree.get(weight);
if (list == null) {
list = Lists.newArrayListWithExpectedSize(1);
tree.put(weight, list);
}
list.add(node);
}
int idx = 0;
for (List<Node> list: tree.values()) {
if (list != null) {
Collections.shuffle(list, r);
for (Node n: list) {
nodes[idx] = n;
idx++;
}
}
}
Preconditions.checkState(idx == activeLen,
"Sorted the wrong number of nodes!");
}
}
| 30,191 | 31.781759 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.util.*;
import java.io.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
/**
* This class implements the {@link DNSToSwitchMapping} interface using a
* script configured via the
* {@link CommonConfigurationKeys#NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY} option.
* <p/>
* It contains a static class <code>RawScriptBasedMapping</code> that performs
* the work: reading the configuration parameters, executing any defined
* script, handling errors and such like. The outer
* class extends {@link CachedDNSToSwitchMapping} to cache the delegated
* queries.
* <p/>
* This DNS mapper's {@link #isSingleSwitch()} predicate returns
* true if and only if a script is defined.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ScriptBasedMapping extends CachedDNSToSwitchMapping {
/**
* Minimum number of arguments: {@value}
*/
static final int MIN_ALLOWABLE_ARGS = 1;
/**
* Default number of arguments: {@value}
*/
static final int DEFAULT_ARG_COUNT =
CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_DEFAULT;
/**
* key to the script filename {@value}
*/
static final String SCRIPT_FILENAME_KEY =
CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY ;
/**
* key to the argument count that the script supports
* {@value}
*/
static final String SCRIPT_ARG_COUNT_KEY =
CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY ;
/**
* Text used in the {@link #toString()} method if there is no string
* {@value}
*/
public static final String NO_SCRIPT = "no script";
/**
* Create an instance with the default configuration.
* </p>
* Calling {@link #setConf(Configuration)} will trigger a
* re-evaluation of the configuration settings and so be used to
* set up the mapping script.
*
*/
public ScriptBasedMapping() {
this(new RawScriptBasedMapping());
}
/**
* Create an instance from the given raw mapping
* @param rawMap raw DNSTOSwithMapping
*/
public ScriptBasedMapping(DNSToSwitchMapping rawMap) {
super(rawMap);
}
/**
* Create an instance from the given configuration
* @param conf configuration
*/
public ScriptBasedMapping(Configuration conf) {
this();
setConf(conf);
}
/**
* Get the cached mapping and convert it to its real type
* @return the inner raw script mapping.
*/
private RawScriptBasedMapping getRawMapping() {
return (RawScriptBasedMapping)rawMapping;
}
@Override
public Configuration getConf() {
return getRawMapping().getConf();
}
@Override
public String toString() {
return "script-based mapping with " + getRawMapping().toString();
}
/**
* {@inheritDoc}
* <p/>
* This will get called in the superclass constructor, so a check is needed
* to ensure that the raw mapping is defined before trying to relaying a null
* configuration.
* @param conf
*/
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
getRawMapping().setConf(conf);
}
/**
* This is the uncached script mapping that is fed into the cache managed
* by the superclass {@link CachedDNSToSwitchMapping}
*/
protected static class RawScriptBasedMapping
extends AbstractDNSToSwitchMapping {
private String scriptName;
private int maxArgs; //max hostnames per call of the script
private static final Log LOG =
LogFactory.getLog(ScriptBasedMapping.class);
/**
* Set the configuration and extract the configuration parameters of interest
* @param conf the new configuration
*/
@Override
public void setConf (Configuration conf) {
super.setConf(conf);
if (conf != null) {
scriptName = conf.get(SCRIPT_FILENAME_KEY);
maxArgs = conf.getInt(SCRIPT_ARG_COUNT_KEY, DEFAULT_ARG_COUNT);
} else {
scriptName = null;
maxArgs = 0;
}
}
/**
* Constructor. The mapping is not ready to use until
* {@link #setConf(Configuration)} has been called
*/
public RawScriptBasedMapping() {}
@Override
public List<String> resolve(List<String> names) {
List<String> m = new ArrayList<String>(names.size());
if (names.isEmpty()) {
return m;
}
if (scriptName == null) {
for (String name : names) {
m.add(NetworkTopology.DEFAULT_RACK);
}
return m;
}
String output = runResolveCommand(names, scriptName);
if (output != null) {
StringTokenizer allSwitchInfo = new StringTokenizer(output);
while (allSwitchInfo.hasMoreTokens()) {
String switchInfo = allSwitchInfo.nextToken();
m.add(switchInfo);
}
if (m.size() != names.size()) {
// invalid number of entries returned by the script
LOG.error("Script " + scriptName + " returned "
+ Integer.toString(m.size()) + " values when "
+ Integer.toString(names.size()) + " were expected.");
return null;
}
} else {
// an error occurred. return null to signify this.
// (exn was already logged in runResolveCommand)
return null;
}
return m;
}
/**
* Build and execute the resolution command. The command is
* executed in the directory specified by the system property
* "user.dir" if set; otherwise the current working directory is used
* @param args a list of arguments
* @return null if the number of arguments is out of range,
* or the output of the command.
*/
protected String runResolveCommand(List<String> args,
String commandScriptName) {
int loopCount = 0;
if (args.size() == 0) {
return null;
}
StringBuilder allOutput = new StringBuilder();
int numProcessed = 0;
if (maxArgs < MIN_ALLOWABLE_ARGS) {
LOG.warn("Invalid value " + Integer.toString(maxArgs)
+ " for " + SCRIPT_ARG_COUNT_KEY + "; must be >= "
+ Integer.toString(MIN_ALLOWABLE_ARGS));
return null;
}
while (numProcessed != args.size()) {
int start = maxArgs * loopCount;
List<String> cmdList = new ArrayList<String>();
cmdList.add(commandScriptName);
for (numProcessed = start; numProcessed < (start + maxArgs) &&
numProcessed < args.size(); numProcessed++) {
cmdList.add(args.get(numProcessed));
}
File dir = null;
String userDir;
if ((userDir = System.getProperty("user.dir")) != null) {
dir = new File(userDir);
}
ShellCommandExecutor s = new ShellCommandExecutor(
cmdList.toArray(new String[cmdList.size()]), dir);
try {
s.execute();
allOutput.append(s.getOutput()).append(" ");
} catch (Exception e) {
LOG.warn("Exception running " + s, e);
return null;
}
loopCount++;
}
return allOutput.toString();
}
/**
* Declare that the mapper is single-switched if a script was not named
* in the configuration.
* @return true iff there is no script
*/
@Override
public boolean isSingleSwitch() {
return scriptName == null;
}
@Override
public String toString() {
return scriptName != null ? ("script " + scriptName) : NO_SCRIPT;
}
@Override
public void reloadCachedMappings() {
// Nothing to do here, since RawScriptBasedMapping has no cache, and
// does not inherit from CachedDNSToSwitchMapping
}
@Override
public void reloadCachedMappings(List<String> names) {
// Nothing to do here, since RawScriptBasedMapping has no cache, and
// does not inherit from CachedDNSToSwitchMapping
}
}
}
| 9,025 | 30.124138 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import java.io.InputStream;
import java.net.Socket;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.ReadableByteChannel;
import java.nio.channels.SelectableChannel;
import java.nio.channels.SelectionKey;
/**
* This implements an input stream that can have a timeout while reading.
* This sets non-blocking flag on the socket channel.
* So after create this object, read() on
* {@link Socket#getInputStream()} and write() on
* {@link Socket#getOutputStream()} for the associated socket will throw
* IllegalBlockingModeException.
* Please use {@link SocketOutputStream} for writing.
*/
@InterfaceAudience.LimitedPrivate("HDFS")
public class SocketInputStream extends InputStream
implements ReadableByteChannel {
private Reader reader;
private static class Reader extends SocketIOWithTimeout {
ReadableByteChannel channel;
Reader(ReadableByteChannel channel, long timeout) throws IOException {
super((SelectableChannel)channel, timeout);
this.channel = channel;
}
@Override
int performIO(ByteBuffer buf) throws IOException {
return channel.read(buf);
}
}
/**
* Create a new input stream with the given timeout. If the timeout
* is zero, it will be treated as infinite timeout. The socket's
* channel will be configured to be non-blocking.
*
* @param channel
* Channel for reading, should also be a {@link SelectableChannel}.
* The channel will be configured to be non-blocking.
* @param timeout timeout in milliseconds. must not be negative.
* @throws IOException
*/
public SocketInputStream(ReadableByteChannel channel, long timeout)
throws IOException {
SocketIOWithTimeout.checkChannelValidity(channel);
reader = new Reader(channel, timeout);
}
/**
* Same as SocketInputStream(socket.getChannel(), timeout): <br><br>
*
* Create a new input stream with the given timeout. If the timeout
* is zero, it will be treated as infinite timeout. The socket's
* channel will be configured to be non-blocking.
*
* @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
*
* @param socket should have a channel associated with it.
* @param timeout timeout timeout in milliseconds. must not be negative.
* @throws IOException
*/
public SocketInputStream(Socket socket, long timeout)
throws IOException {
this(socket.getChannel(), timeout);
}
/**
* Same as SocketInputStream(socket.getChannel(), socket.getSoTimeout())
* :<br><br>
*
* Create a new input stream with the given timeout. If the timeout
* is zero, it will be treated as infinite timeout. The socket's
* channel will be configured to be non-blocking.
* @see SocketInputStream#SocketInputStream(ReadableByteChannel, long)
*
* @param socket should have a channel associated with it.
* @throws IOException
*/
public SocketInputStream(Socket socket) throws IOException {
this(socket.getChannel(), socket.getSoTimeout());
}
@Override
public int read() throws IOException {
/* Allocation can be removed if required.
* probably no need to optimize or encourage single byte read.
*/
byte[] buf = new byte[1];
int ret = read(buf, 0, 1);
if (ret > 0) {
return (int)(buf[0] & 0xff);
}
if (ret != -1) {
// unexpected
throw new IOException("Could not read from stream");
}
return ret;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return read(ByteBuffer.wrap(b, off, len));
}
@Override
public synchronized void close() throws IOException {
/* close the channel since Socket.getInputStream().close()
* closes the socket.
*/
reader.channel.close();
reader.close();
}
/**
* Returns underlying channel used by inputstream.
* This is useful in certain cases like channel for
* {@link FileChannel#transferFrom(ReadableByteChannel, long, long)}.
*/
public ReadableByteChannel getChannel() {
return reader.channel;
}
//ReadableByteChannel interface
@Override
public boolean isOpen() {
return reader.isOpen();
}
@Override
public int read(ByteBuffer dst) throws IOException {
return reader.doIO(dst, SelectionKey.OP_READ);
}
/**
* waits for the underlying channel to be ready for reading.
* The timeout specified for this stream applies to this wait.
*
* @throws SocketTimeoutException
* if select on the channel times out.
* @throws IOException
* if any other I/O error occurs.
*/
public void waitForReadable() throws IOException {
reader.waitForIO(SelectionKey.OP_READ);
}
public void setTimeout(long timeoutMs) {
reader.setTimeout(timeoutMs);
}
}
| 5,927 | 31.751381 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.io.EOFException;
import java.io.IOException;
import java.io.OutputStream;
import java.net.Socket;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.SelectableChannel;
import java.nio.channels.SelectionKey;
import java.nio.channels.WritableByteChannel;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.metrics2.lib.MutableRate;
/**
* This implements an output stream that can have a timeout while writing.
* This sets non-blocking flag on the socket channel.
* So after creating this object , read() on
* {@link Socket#getInputStream()} and write() on
* {@link Socket#getOutputStream()} on the associated socket will throw
* llegalBlockingModeException.
* Please use {@link SocketInputStream} for reading.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class SocketOutputStream extends OutputStream
implements WritableByteChannel {
private Writer writer;
private static class Writer extends SocketIOWithTimeout {
WritableByteChannel channel;
Writer(WritableByteChannel channel, long timeout) throws IOException {
super((SelectableChannel)channel, timeout);
this.channel = channel;
}
@Override
int performIO(ByteBuffer buf) throws IOException {
return channel.write(buf);
}
}
/**
* Create a new ouput stream with the given timeout. If the timeout
* is zero, it will be treated as infinite timeout. The socket's
* channel will be configured to be non-blocking.
*
* @param channel
* Channel for writing, should also be a {@link SelectableChannel}.
* The channel will be configured to be non-blocking.
* @param timeout timeout in milliseconds. must not be negative.
* @throws IOException
*/
public SocketOutputStream(WritableByteChannel channel, long timeout)
throws IOException {
SocketIOWithTimeout.checkChannelValidity(channel);
writer = new Writer(channel, timeout);
}
/**
* Same as SocketOutputStream(socket.getChannel(), timeout):<br><br>
*
* Create a new ouput stream with the given timeout. If the timeout
* is zero, it will be treated as infinite timeout. The socket's
* channel will be configured to be non-blocking.
*
* @see SocketOutputStream#SocketOutputStream(WritableByteChannel, long)
*
* @param socket should have a channel associated with it.
* @param timeout timeout timeout in milliseconds. must not be negative.
* @throws IOException
*/
public SocketOutputStream(Socket socket, long timeout)
throws IOException {
this(socket.getChannel(), timeout);
}
@Override
public void write(int b) throws IOException {
/* If we need to, we can optimize this allocation.
* probably no need to optimize or encourage single byte writes.
*/
byte[] buf = new byte[1];
buf[0] = (byte)b;
write(buf, 0, 1);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
ByteBuffer buf = ByteBuffer.wrap(b, off, len);
while (buf.hasRemaining()) {
try {
if (write(buf) < 0) {
throw new IOException("The stream is closed");
}
} catch (IOException e) {
/* Unlike read, write can not inform user of partial writes.
* So will close this if there was a partial write.
*/
if (buf.capacity() > buf.remaining()) {
writer.close();
}
throw e;
}
}
}
@Override
public synchronized void close() throws IOException {
/* close the channel since Socket.getOuputStream().close()
* closes the socket.
*/
writer.channel.close();
writer.close();
}
/**
* Returns underlying channel used by this stream.
* This is useful in certain cases like channel for
* {@link FileChannel#transferTo(long, long, WritableByteChannel)}
*/
public WritableByteChannel getChannel() {
return writer.channel;
}
//WritableByteChannle interface
@Override
public boolean isOpen() {
return writer.isOpen();
}
@Override
public int write(ByteBuffer src) throws IOException {
return writer.doIO(src, SelectionKey.OP_WRITE);
}
/**
* waits for the underlying channel to be ready for writing.
* The timeout specified for this stream applies to this wait.
*
* @throws SocketTimeoutException
* if select on the channel times out.
* @throws IOException
* if any other I/O error occurs.
*/
public void waitForWritable() throws IOException {
writer.waitForIO(SelectionKey.OP_WRITE);
}
/**
* Transfers data from FileChannel using
* {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
* Updates <code>waitForWritableTime</code> and <code>transferToTime</code>
* with the time spent blocked on the network and the time spent transferring
* data from disk to network respectively.
*
* Similar to readFully(), this waits till requested amount of
* data is transfered.
*
* @param fileCh FileChannel to transfer data from.
* @param position position within the channel where the transfer begins
* @param count number of bytes to transfer.
* @param waitForWritableTime nanoseconds spent waiting for the socket
* to become writable
* @param transferTime nanoseconds spent transferring data
*
* @throws EOFException
* If end of input file is reached before requested number of
* bytes are transfered.
*
* @throws SocketTimeoutException
* If this channel blocks transfer longer than timeout for
* this stream.
*
* @throws IOException Includes any exception thrown by
* {@link FileChannel#transferTo(long, long, WritableByteChannel)}.
*/
public void transferToFully(FileChannel fileCh, long position, int count,
LongWritable waitForWritableTime,
LongWritable transferToTime) throws IOException {
long waitTime = 0;
long transferTime = 0;
while (count > 0) {
/*
* Ideally we should wait after transferTo returns 0. But because of
* a bug in JRE on Linux (http://bugs.sun.com/view_bug.do?bug_id=5103988),
* which throws an exception instead of returning 0, we wait for the
* channel to be writable before writing to it. If you ever see
* IOException with message "Resource temporarily unavailable"
* thrown here, please let us know.
*
* Once we move to JAVA SE 7, wait should be moved to correct place.
*/
long start = System.nanoTime();
waitForWritable();
long wait = System.nanoTime();
int nTransfered = (int) fileCh.transferTo(position, count, getChannel());
if (nTransfered == 0) {
//check if end of file is reached.
if (position >= fileCh.size()) {
throw new EOFException("EOF Reached. file size is " + fileCh.size() +
" and " + count + " more bytes left to be " +
"transfered.");
}
//otherwise assume the socket is full.
//waitForWritable(); // see comment above.
} else if (nTransfered < 0) {
throw new IOException("Unexpected return of " + nTransfered +
" from transferTo()");
} else {
position += nTransfered;
count -= nTransfered;
}
long transfer = System.nanoTime();
waitTime += wait - start;
transferTime += transfer - wait;
}
if (waitForWritableTime != null) {
waitForWritableTime.set(waitTime);
}
if (transferToTime != null) {
transferToTime.set(transferTime);
}
}
/**
* Call
* {@link #transferToFully(FileChannel, long, int, MutableRate, MutableRate)}
* with null <code>waitForWritableTime</code> and <code>transferToTime</code>
*/
public void transferToFully(FileChannel fileCh, long position, int count)
throws IOException {
transferToFully(fileCh, position, count, null, null);
}
public void setTimeout(int timeoutMs) {
writer.setTimeout(timeoutMs);
}
}
| 9,378 | 33.996269 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A cached implementation of DNSToSwitchMapping that takes an
* raw DNSToSwitchMapping and stores the resolved network location in
* a cache. The following calls to a resolved network location
* will get its location from the cache.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class CachedDNSToSwitchMapping extends AbstractDNSToSwitchMapping {
private Map<String, String> cache = new ConcurrentHashMap<String, String>();
/**
* The uncached mapping
*/
protected final DNSToSwitchMapping rawMapping;
/**
* cache a raw DNS mapping
* @param rawMapping the raw mapping to cache
*/
public CachedDNSToSwitchMapping(DNSToSwitchMapping rawMapping) {
this.rawMapping = rawMapping;
}
/**
* @param names a list of hostnames to probe for being cached
* @return the hosts from 'names' that have not been cached previously
*/
private List<String> getUncachedHosts(List<String> names) {
// find out all names without cached resolved location
List<String> unCachedHosts = new ArrayList<String>(names.size());
for (String name : names) {
if (cache.get(name) == null) {
unCachedHosts.add(name);
}
}
return unCachedHosts;
}
/**
* Caches the resolved host:rack mappings. The two list
* parameters must be of equal size.
*
* @param uncachedHosts a list of hosts that were uncached
* @param resolvedHosts a list of resolved host entries where the element
* at index(i) is the resolved value for the entry in uncachedHosts[i]
*/
private void cacheResolvedHosts(List<String> uncachedHosts,
List<String> resolvedHosts) {
// Cache the result
if (resolvedHosts != null) {
for (int i=0; i<uncachedHosts.size(); i++) {
cache.put(uncachedHosts.get(i), resolvedHosts.get(i));
}
}
}
/**
* @param names a list of hostnames to look up (can be be empty)
* @return the cached resolution of the list of hostnames/addresses.
* or null if any of the names are not currently in the cache
*/
private List<String> getCachedHosts(List<String> names) {
List<String> result = new ArrayList<String>(names.size());
// Construct the result
for (String name : names) {
String networkLocation = cache.get(name);
if (networkLocation != null) {
result.add(networkLocation);
} else {
return null;
}
}
return result;
}
@Override
public List<String> resolve(List<String> names) {
// normalize all input names to be in the form of IP addresses
names = NetUtils.normalizeHostNames(names);
List <String> result = new ArrayList<String>(names.size());
if (names.isEmpty()) {
return result;
}
List<String> uncachedHosts = getUncachedHosts(names);
// Resolve the uncached hosts
List<String> resolvedHosts = rawMapping.resolve(uncachedHosts);
//cache them
cacheResolvedHosts(uncachedHosts, resolvedHosts);
//now look up the entire list in the cache
return getCachedHosts(names);
}
/**
* Get the (host x switch) map.
* @return a copy of the cached map of hosts to rack
*/
@Override
public Map<String, String> getSwitchMap() {
Map<String, String > switchMap = new HashMap<String, String>(cache);
return switchMap;
}
@Override
public String toString() {
return "cached switch mapping relaying to " + rawMapping;
}
/**
* Delegate the switch topology query to the raw mapping, via
* {@link AbstractDNSToSwitchMapping#isMappingSingleSwitch(DNSToSwitchMapping)}
* @return true iff the raw mapper is considered single-switch.
*/
@Override
public boolean isSingleSwitch() {
return isMappingSingleSwitch(rawMapping);
}
@Override
public void reloadCachedMappings() {
cache.clear();
}
@Override
public void reloadCachedMappings(List<String> names) {
for (String name : names) {
cache.remove(name);
}
}
}
| 5,069 | 29.727273 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NodeBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A base class that implements interface Node
*
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class NodeBase implements Node {
/** Path separator {@value} */
public final static char PATH_SEPARATOR = '/';
/** Path separator as a string {@value} */
public final static String PATH_SEPARATOR_STR = "/";
/** string representation of root {@value} */
public final static String ROOT = "";
protected String name; //host:port#
protected String location; //string representation of this node's location
protected int level; //which level of the tree the node resides
protected Node parent; //its parent
/** Default constructor */
public NodeBase() {
}
/** Construct a node from its path
* @param path
* a concatenation of this node's location, the path seperator, and its name
*/
public NodeBase(String path) {
path = normalize(path);
int index = path.lastIndexOf(PATH_SEPARATOR);
if (index== -1) {
set(ROOT, path);
} else {
set(path.substring(index+1), path.substring(0, index));
}
}
/** Construct a node from its name and its location
* @param name this node's name (can be null, must not contain {@link #PATH_SEPARATOR})
* @param location this node's location
*/
public NodeBase(String name, String location) {
set(name, normalize(location));
}
/** Construct a node from its name and its location
* @param name this node's name (can be null, must not contain {@link #PATH_SEPARATOR})
* @param location this node's location
* @param parent this node's parent node
* @param level this node's level in the tree
*/
public NodeBase(String name, String location, Node parent, int level) {
set(name, normalize(location));
this.parent = parent;
this.level = level;
}
/**
* set this node's name and location
* @param name the (nullable) name -which cannot contain the {@link #PATH_SEPARATOR}
* @param location the location
*/
private void set(String name, String location) {
if (name != null && name.contains(PATH_SEPARATOR_STR))
throw new IllegalArgumentException(
"Network location name contains /: "+name);
this.name = (name==null)?"":name;
this.location = location;
}
/** @return this node's name */
@Override
public String getName() { return name; }
/** @return this node's network location */
@Override
public String getNetworkLocation() { return location; }
/** Set this node's network location
* @param location the location
*/
@Override
public void setNetworkLocation(String location) { this.location = location; }
/**
* Get the path of a node
* @param node a non-null node
* @return the path of a node
*/
public static String getPath(Node node) {
return node.getNetworkLocation() + PATH_SEPARATOR_STR + node.getName();
}
/** @return this node's path as its string representation */
@Override
public String toString() {
return getPath(this);
}
/** Normalize a path by stripping off any trailing {@link #PATH_SEPARATOR}
* @param path path to normalize.
* @return the normalised path
* If <i>path</i>is null or empty {@link #ROOT} is returned
* @throws IllegalArgumentException if the first character of a non empty path
* is not {@link #PATH_SEPARATOR}
*/
public static String normalize(String path) {
if (path == null || path.length() == 0) return ROOT;
if (path.charAt(0) != PATH_SEPARATOR) {
throw new IllegalArgumentException(
"Network Location path does not start with "
+PATH_SEPARATOR_STR+ ": "+path);
}
int len = path.length();
if (path.charAt(len-1) == PATH_SEPARATOR) {
return path.substring(0, len-1);
}
return path;
}
/** @return this node's parent */
@Override
public Node getParent() { return parent; }
/** Set this node's parent
* @param parent the parent
*/
@Override
public void setParent(Node parent) {
this.parent = parent;
}
/** @return this node's level in the tree.
* E.g. the root of a tree returns 0 and its children return 1
*/
@Override
public int getLevel() { return level; }
/** Set this node's level in the tree
* @param level the level
*/
@Override
public void setLevel(int level) {
this.level = level;
}
public static int locationToDepth(String location) {
String normalizedLocation = normalize(location);
int length = normalizedLocation.length();
int depth = 0;
for (int i = 0; i < length; i++) {
if (normalizedLocation.charAt(i) == PATH_SEPARATOR) {
depth++;
}
}
return depth;
}
}
| 5,801 | 30.704918 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.SocketAddress;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.nio.channels.SelectableChannel;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.SocketChannel;
import java.nio.channels.spi.SelectorProvider;
import java.util.Iterator;
import java.util.LinkedList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
/**
* This supports input and output streams for a socket channels.
* These streams can have a timeout.
*/
abstract class SocketIOWithTimeout {
// This is intentionally package private.
static final Log LOG = LogFactory.getLog(SocketIOWithTimeout.class);
private SelectableChannel channel;
private long timeout;
private boolean closed = false;
private static SelectorPool selector = new SelectorPool();
/* A timeout value of 0 implies wait for ever.
* We should have a value of timeout that implies zero wait.. i.e.
* read or write returns immediately.
*
* This will set channel to non-blocking.
*/
SocketIOWithTimeout(SelectableChannel channel, long timeout)
throws IOException {
checkChannelValidity(channel);
this.channel = channel;
this.timeout = timeout;
// Set non-blocking
channel.configureBlocking(false);
}
void close() {
closed = true;
}
boolean isOpen() {
return !closed && channel.isOpen();
}
SelectableChannel getChannel() {
return channel;
}
/**
* Utility function to check if channel is ok.
* Mainly to throw IOException instead of runtime exception
* in case of mismatch. This mismatch can occur for many runtime
* reasons.
*/
static void checkChannelValidity(Object channel) throws IOException {
if (channel == null) {
/* Most common reason is that original socket does not have a channel.
* So making this an IOException rather than a RuntimeException.
*/
throw new IOException("Channel is null. Check " +
"how the channel or socket is created.");
}
if (!(channel instanceof SelectableChannel)) {
throw new IOException("Channel should be a SelectableChannel");
}
}
/**
* Performs actual IO operations. This is not expected to block.
*
* @param buf
* @return number of bytes (or some equivalent). 0 implies underlying
* channel is drained completely. We will wait if more IO is
* required.
* @throws IOException
*/
abstract int performIO(ByteBuffer buf) throws IOException;
/**
* Performs one IO and returns number of bytes read or written.
* It waits up to the specified timeout. If the channel is
* not read before the timeout, SocketTimeoutException is thrown.
*
* @param buf buffer for IO
* @param ops Selection Ops used for waiting. Suggested values:
* SelectionKey.OP_READ while reading and SelectionKey.OP_WRITE while
* writing.
*
* @return number of bytes read or written. negative implies end of stream.
* @throws IOException
*/
int doIO(ByteBuffer buf, int ops) throws IOException {
/* For now only one thread is allowed. If user want to read or write
* from multiple threads, multiple streams could be created. In that
* case multiple threads work as well as underlying channel supports it.
*/
if (!buf.hasRemaining()) {
throw new IllegalArgumentException("Buffer has no data left.");
//or should we just return 0?
}
while (buf.hasRemaining()) {
if (closed) {
return -1;
}
try {
int n = performIO(buf);
if (n != 0) {
// successful io or an error.
return n;
}
} catch (IOException e) {
if (!channel.isOpen()) {
closed = true;
}
throw e;
}
//now wait for socket to be ready.
int count = 0;
try {
count = selector.select(channel, ops, timeout);
} catch (IOException e) { //unexpected IOException.
closed = true;
throw e;
}
if (count == 0) {
throw new SocketTimeoutException(timeoutExceptionString(channel,
timeout, ops));
}
// otherwise the socket should be ready for io.
}
return 0; // does not reach here.
}
/**
* The contract is similar to {@link SocketChannel#connect(SocketAddress)}
* with a timeout.
*
* @see SocketChannel#connect(SocketAddress)
*
* @param channel - this should be a {@link SelectableChannel}
* @param endpoint
* @throws IOException
*/
static void connect(SocketChannel channel,
SocketAddress endpoint, int timeout) throws IOException {
boolean blockingOn = channel.isBlocking();
if (blockingOn) {
channel.configureBlocking(false);
}
try {
if (channel.connect(endpoint)) {
return;
}
long timeoutLeft = timeout;
long endTime = (timeout > 0) ? (Time.now() + timeout): 0;
while (true) {
// we might have to call finishConnect() more than once
// for some channels (with user level protocols)
int ret = selector.select((SelectableChannel)channel,
SelectionKey.OP_CONNECT, timeoutLeft);
if (ret > 0 && channel.finishConnect()) {
return;
}
if (ret == 0 ||
(timeout > 0 &&
(timeoutLeft = (endTime - Time.now())) <= 0)) {
throw new SocketTimeoutException(
timeoutExceptionString(channel, timeout,
SelectionKey.OP_CONNECT));
}
}
} catch (IOException e) {
// javadoc for SocketChannel.connect() says channel should be closed.
try {
channel.close();
} catch (IOException ignored) {}
throw e;
} finally {
if (blockingOn && channel.isOpen()) {
channel.configureBlocking(true);
}
}
}
/**
* This is similar to {@link #doIO(ByteBuffer, int)} except that it
* does not perform any I/O. It just waits for the channel to be ready
* for I/O as specified in ops.
*
* @param ops Selection Ops used for waiting
*
* @throws SocketTimeoutException
* if select on the channel times out.
* @throws IOException
* if any other I/O error occurs.
*/
void waitForIO(int ops) throws IOException {
if (selector.select(channel, ops, timeout) == 0) {
throw new SocketTimeoutException(timeoutExceptionString(channel, timeout,
ops));
}
}
public void setTimeout(long timeoutMs) {
this.timeout = timeoutMs;
}
private static String timeoutExceptionString(SelectableChannel channel,
long timeout, int ops) {
String waitingFor;
switch(ops) {
case SelectionKey.OP_READ :
waitingFor = "read"; break;
case SelectionKey.OP_WRITE :
waitingFor = "write"; break;
case SelectionKey.OP_CONNECT :
waitingFor = "connect"; break;
default :
waitingFor = "" + ops;
}
return timeout + " millis timeout while " +
"waiting for channel to be ready for " +
waitingFor + ". ch : " + channel;
}
/**
* This maintains a pool of selectors. These selectors are closed
* once they are idle (unused) for a few seconds.
*/
private static class SelectorPool {
private static class SelectorInfo {
Selector selector;
long lastActivityTime;
LinkedList<SelectorInfo> queue;
void close() {
if (selector != null) {
try {
selector.close();
} catch (IOException e) {
LOG.warn("Unexpected exception while closing selector : ", e);
}
}
}
}
private static class ProviderInfo {
SelectorProvider provider;
LinkedList<SelectorInfo> queue; // lifo
ProviderInfo next;
}
private static final long IDLE_TIMEOUT = 10 * 1000; // 10 seconds.
private ProviderInfo providerList = null;
/**
* Waits on the channel with the given timeout using one of the
* cached selectors. It also removes any cached selectors that are
* idle for a few seconds.
*
* @param channel
* @param ops
* @param timeout
* @return
* @throws IOException
*/
int select(SelectableChannel channel, int ops, long timeout)
throws IOException {
SelectorInfo info = get(channel);
SelectionKey key = null;
int ret = 0;
try {
while (true) {
long start = (timeout == 0) ? 0 : Time.now();
key = channel.register(info.selector, ops);
ret = info.selector.select(timeout);
if (ret != 0) {
return ret;
}
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedIOException("Interrupted while waiting for "
+ "IO on channel " + channel + ". " + timeout
+ " millis timeout left.");
}
/* Sometimes select() returns 0 much before timeout for
* unknown reasons. So select again if required.
*/
if (timeout > 0) {
timeout -= Time.now() - start;
if (timeout <= 0) {
return 0;
}
}
}
} finally {
if (key != null) {
key.cancel();
}
//clear the canceled key.
try {
info.selector.selectNow();
} catch (IOException e) {
LOG.info("Unexpected Exception while clearing selector : ", e);
// don't put the selector back.
info.close();
return ret;
}
release(info);
}
}
/**
* Takes one selector from end of LRU list of free selectors.
* If there are no selectors awailable, it creates a new selector.
* Also invokes trimIdleSelectors().
*
* @param channel
* @return
* @throws IOException
*/
private synchronized SelectorInfo get(SelectableChannel channel)
throws IOException {
SelectorInfo selInfo = null;
SelectorProvider provider = channel.provider();
// pick the list : rarely there is more than one provider in use.
ProviderInfo pList = providerList;
while (pList != null && pList.provider != provider) {
pList = pList.next;
}
if (pList == null) {
//LOG.info("Creating new ProviderInfo : " + provider.toString());
pList = new ProviderInfo();
pList.provider = provider;
pList.queue = new LinkedList<SelectorInfo>();
pList.next = providerList;
providerList = pList;
}
LinkedList<SelectorInfo> queue = pList.queue;
if (queue.isEmpty()) {
Selector selector = provider.openSelector();
selInfo = new SelectorInfo();
selInfo.selector = selector;
selInfo.queue = queue;
} else {
selInfo = queue.removeLast();
}
trimIdleSelectors(Time.now());
return selInfo;
}
/**
* puts selector back at the end of LRU list of free selectos.
* Also invokes trimIdleSelectors().
*
* @param info
*/
private synchronized void release(SelectorInfo info) {
long now = Time.now();
trimIdleSelectors(now);
info.lastActivityTime = now;
info.queue.addLast(info);
}
/**
* Closes selectors that are idle for IDLE_TIMEOUT (10 sec). It does not
* traverse the whole list, just over the one that have crossed
* the timeout.
*/
private void trimIdleSelectors(long now) {
long cutoff = now - IDLE_TIMEOUT;
for(ProviderInfo pList=providerList; pList != null; pList=pList.next) {
if (pList.queue.isEmpty()) {
continue;
}
for(Iterator<SelectorInfo> it = pList.queue.iterator(); it.hasNext();) {
SelectorInfo info = it.next();
if (info.lastActivityTime > cutoff) {
break;
}
it.remove();
info.close();
}
}
}
}
}
| 13,742 | 29.00655 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.Enumeration;
import java.util.LinkedHashSet;
import java.util.Vector;
import javax.naming.NamingException;
import javax.naming.directory.Attributes;
import javax.naming.directory.DirContext;
import javax.naming.directory.InitialDirContext;
/**
*
* A class that provides direct and reverse lookup functionalities, allowing
* the querying of specific network interfaces or nameservers.
*
*
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class DNS {
private static final Log LOG = LogFactory.getLog(DNS.class);
/**
* The cached hostname -initially null.
*/
private static final String cachedHostname = resolveLocalHostname();
private static final String cachedHostAddress = resolveLocalHostIPAddress();
private static final String LOCALHOST = "localhost";
/**
* Returns the hostname associated with the specified IP address by the
* provided nameserver.
*
* Loopback addresses
* @param hostIp The address to reverse lookup
* @param ns The host name of a reachable DNS server
* @return The host name associated with the provided IP
* @throws NamingException If a NamingException is encountered
*/
public static String reverseDns(InetAddress hostIp, String ns)
throws NamingException {
//
// Builds the reverse IP lookup form
// This is formed by reversing the IP numbers and appending in-addr.arpa
//
String[] parts = hostIp.getHostAddress().split("\\.");
String reverseIP = parts[3] + "." + parts[2] + "." + parts[1] + "."
+ parts[0] + ".in-addr.arpa";
DirContext ictx = new InitialDirContext();
Attributes attribute;
try {
attribute = ictx.getAttributes("dns://" // Use "dns:///" if the default
+ ((ns == null) ? "" : ns) +
// nameserver is to be used
"/" + reverseIP, new String[] { "PTR" });
} finally {
ictx.close();
}
String hostname = attribute.get("PTR").get().toString();
int hostnameLength = hostname.length();
if (hostname.charAt(hostnameLength - 1) == '.') {
hostname = hostname.substring(0, hostnameLength - 1);
}
return hostname;
}
/**
* @return NetworkInterface for the given subinterface name (eg eth0:0)
* or null if no interface with the given name can be found
*/
private static NetworkInterface getSubinterface(String strInterface)
throws SocketException {
Enumeration<NetworkInterface> nifs =
NetworkInterface.getNetworkInterfaces();
while (nifs.hasMoreElements()) {
Enumeration<NetworkInterface> subNifs =
nifs.nextElement().getSubInterfaces();
while (subNifs.hasMoreElements()) {
NetworkInterface nif = subNifs.nextElement();
if (nif.getName().equals(strInterface)) {
return nif;
}
}
}
return null;
}
/**
* @param nif network interface to get addresses for
* @return set containing addresses for each subinterface of nif,
* see below for the rationale for using an ordered set
*/
private static LinkedHashSet<InetAddress> getSubinterfaceInetAddrs(
NetworkInterface nif) {
LinkedHashSet<InetAddress> addrs = new LinkedHashSet<InetAddress>();
Enumeration<NetworkInterface> subNifs = nif.getSubInterfaces();
while (subNifs.hasMoreElements()) {
NetworkInterface subNif = subNifs.nextElement();
addrs.addAll(Collections.list(subNif.getInetAddresses()));
}
return addrs;
}
/**
* Like {@link DNS#getIPs(String, boolean), but returns all
* IPs associated with the given interface and its subinterfaces.
*/
public static String[] getIPs(String strInterface)
throws UnknownHostException {
return getIPs(strInterface, true);
}
/**
* Returns all the IPs associated with the provided interface, if any, in
* textual form.
*
* @param strInterface
* The name of the network interface or sub-interface to query
* (eg eth0 or eth0:0) or the string "default"
* @param returnSubinterfaces
* Whether to return IPs associated with subinterfaces of
* the given interface
* @return A string vector of all the IPs associated with the provided
* interface. The local host IP is returned if the interface
* name "default" is specified or there is an I/O error looking
* for the given interface.
* @throws UnknownHostException
* If the given interface is invalid
*
*/
public static String[] getIPs(String strInterface,
boolean returnSubinterfaces) throws UnknownHostException {
if ("default".equals(strInterface)) {
return new String[] { cachedHostAddress };
}
NetworkInterface netIf;
try {
netIf = NetworkInterface.getByName(strInterface);
if (netIf == null) {
netIf = getSubinterface(strInterface);
}
} catch (SocketException e) {
LOG.warn("I/O error finding interface " + strInterface +
": " + e.getMessage());
return new String[] { cachedHostAddress };
}
if (netIf == null) {
throw new UnknownHostException("No such interface " + strInterface);
}
// NB: Using a LinkedHashSet to preserve the order for callers
// that depend on a particular element being 1st in the array.
// For example, getDefaultIP always returns the first element.
LinkedHashSet<InetAddress> allAddrs = new LinkedHashSet<InetAddress>();
allAddrs.addAll(Collections.list(netIf.getInetAddresses()));
if (!returnSubinterfaces) {
allAddrs.removeAll(getSubinterfaceInetAddrs(netIf));
}
String ips[] = new String[allAddrs.size()];
int i = 0;
for (InetAddress addr : allAddrs) {
ips[i++] = addr.getHostAddress();
}
return ips;
}
/**
* Returns the first available IP address associated with the provided
* network interface or the local host IP if "default" is given.
*
* @param strInterface
* The name of the network interface or subinterface to query
* (e.g. eth0 or eth0:0) or the string "default"
* @return The IP address in text form, the local host IP is returned
* if the interface name "default" is specified
* @throws UnknownHostException
* If the given interface is invalid
*/
public static String getDefaultIP(String strInterface)
throws UnknownHostException {
String[] ips = getIPs(strInterface);
return ips[0];
}
/**
* Returns all the host names associated by the provided nameserver with the
* address bound to the specified network interface
*
* @param strInterface
* The name of the network interface or subinterface to query
* (e.g. eth0 or eth0:0)
* @param nameserver
* The DNS host name
* @return A string vector of all host names associated with the IPs tied to
* the specified interface
* @throws UnknownHostException if the given interface is invalid
*/
public static String[] getHosts(String strInterface, String nameserver)
throws UnknownHostException {
String[] ips = getIPs(strInterface);
Vector<String> hosts = new Vector<String>();
for (int ctr = 0; ctr < ips.length; ctr++) {
try {
hosts.add(reverseDns(InetAddress.getByName(ips[ctr]),
nameserver));
} catch (UnknownHostException ignored) {
} catch (NamingException ignored) {
}
}
if (hosts.isEmpty()) {
LOG.warn("Unable to determine hostname for interface " + strInterface);
return new String[] { cachedHostname };
} else {
return hosts.toArray(new String[hosts.size()]);
}
}
/**
* Determine the local hostname; retrieving it from cache if it is known
* If we cannot determine our host name, return "localhost"
* @return the local hostname or "localhost"
*/
private static String resolveLocalHostname() {
String localhost;
try {
localhost = InetAddress.getLocalHost().getCanonicalHostName();
} catch (UnknownHostException e) {
LOG.warn("Unable to determine local hostname "
+ "-falling back to \"" + LOCALHOST + "\"", e);
localhost = LOCALHOST;
}
return localhost;
}
/**
* Get the IPAddress of the local host as a string.
* This will be a loop back value if the local host address cannot be
* determined.
* If the loopback address of "localhost" does not resolve, then the system's
* network is in such a state that nothing is going to work. A message is
* logged at the error level and a null pointer returned, a pointer
* which will trigger failures later on the application
* @return the IPAddress of the local host or null for a serious problem.
*/
private static String resolveLocalHostIPAddress() {
String address;
try {
address = InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
LOG.warn("Unable to determine address of the host"
+ "-falling back to \"" + LOCALHOST + "\" address", e);
try {
address = InetAddress.getByName(LOCALHOST).getHostAddress();
} catch (UnknownHostException noLocalHostAddressException) {
//at this point, deep trouble
LOG.error("Unable to determine local loopback address "
+ "of \"" + LOCALHOST + "\" " +
"-this system's network configuration is unsupported", e);
address = null;
}
}
return address;
}
/**
* Returns all the host names associated by the default nameserver with the
* address bound to the specified network interface
*
* @param strInterface
* The name of the network interface to query (e.g. eth0)
* @return The list of host names associated with IPs bound to the network
* interface
* @throws UnknownHostException
* If one is encountered while querying the default interface
*
*/
public static String[] getHosts(String strInterface)
throws UnknownHostException {
return getHosts(strInterface, null);
}
/**
* Returns the default (first) host name associated by the provided
* nameserver with the address bound to the specified network interface
*
* @param strInterface
* The name of the network interface to query (e.g. eth0)
* @param nameserver
* The DNS host name
* @return The default host names associated with IPs bound to the network
* interface
* @throws UnknownHostException
* If one is encountered while querying the default interface
*/
public static String getDefaultHost(String strInterface, String nameserver)
throws UnknownHostException {
if ("default".equals(strInterface)) {
return cachedHostname;
}
if ("default".equals(nameserver)) {
return getDefaultHost(strInterface);
}
String[] hosts = getHosts(strInterface, nameserver);
return hosts[0];
}
/**
* Returns the default (first) host name associated by the default
* nameserver with the address bound to the specified network interface
*
* @param strInterface
* The name of the network interface to query (e.g. eth0).
* Must not be null.
* @return The default host name associated with IPs bound to the network
* interface
* @throws UnknownHostException
* If one is encountered while querying the default interface
*/
public static String getDefaultHost(String strInterface)
throws UnknownHostException {
return getDefaultHost(strInterface, null);
}
}
| 13,020 | 34.576503 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* An interface that must be implemented to allow pluggable
* DNS-name/IP-address to RackID resolvers.
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface DNSToSwitchMapping {
/**
* Resolves a list of DNS-names/IP-addresses and returns back a list of
* switch information (network paths). One-to-one correspondence must be
* maintained between the elements in the lists.
* Consider an element in the argument list - x.y.com. The switch information
* that is returned must be a network path of the form /foo/rack,
* where / is the root, and 'foo' is the switch where 'rack' is connected.
* Note the hostname/ip-address is not part of the returned path.
* The network topology of the cluster would determine the number of
* components in the network path.
* <p/>
*
* If a name cannot be resolved to a rack, the implementation
* should return {@link NetworkTopology#DEFAULT_RACK}. This
* is what the bundled implementations do, though it is not a formal requirement
*
* @param names the list of hosts to resolve (can be empty)
* @return list of resolved network paths.
* If <i>names</i> is empty, the returned list is also empty
*/
public List<String> resolve(List<String> names);
/**
* Reload all of the cached mappings.
*
* If there is a cache, this method will clear it, so that future accesses
* will get a chance to see the new data.
*/
public void reloadCachedMappings();
/**
* Reload cached mappings on specific nodes.
*
* If there is a cache on these nodes, this method will clear it, so that
* future accesses will see updated data.
*/
public void reloadCachedMappings(List<String> names);
}
| 2,720 | 37.323944 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMappingWithDependency.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
/**
* This class extends ScriptBasedMapping class and implements
* the {@link DNSToSwitchMappingWithDependency} interface using
* a script configured via the
* {@link CommonConfigurationKeys#NET_DEPENDENCY_SCRIPT_FILE_NAME_KEY} option.
* <p/>
* It contains a static class <code>RawScriptBasedMappingWithDependency</code>
* that performs the getDependency work.
* <p/>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ScriptBasedMappingWithDependency extends ScriptBasedMapping
implements DNSToSwitchMappingWithDependency {
/**
* key to the dependency script filename {@value}
*/
static final String DEPENDENCY_SCRIPT_FILENAME_KEY =
CommonConfigurationKeys.NET_DEPENDENCY_SCRIPT_FILE_NAME_KEY;
private Map<String, List<String>> dependencyCache =
new ConcurrentHashMap<String, List<String>>();
/**
* Create an instance with the default configuration.
* </p>
* Calling {@link #setConf(Configuration)} will trigger a
* re-evaluation of the configuration settings and so be used to
* set up the mapping script.
*/
public ScriptBasedMappingWithDependency() {
super(new RawScriptBasedMappingWithDependency());
}
/**
* Get the cached mapping and convert it to its real type
* @return the inner raw script mapping.
*/
private RawScriptBasedMappingWithDependency getRawMapping() {
return (RawScriptBasedMappingWithDependency)rawMapping;
}
@Override
public String toString() {
return "script-based mapping with " + getRawMapping().toString();
}
/**
* {@inheritDoc}
* <p/>
* This will get called in the superclass constructor, so a check is needed
* to ensure that the raw mapping is defined before trying to relaying a null
* configuration.
* @param conf
*/
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
getRawMapping().setConf(conf);
}
/**
* Get dependencies in the topology for a given host
* @param name - host name for which we are getting dependency
* @return a list of hosts dependent on the provided host name
*/
@Override
public List<String> getDependency(String name) {
//normalize all input names to be in the form of IP addresses
name = NetUtils.normalizeHostName(name);
if (name==null) {
return Collections.emptyList();
}
List<String> dependencies = dependencyCache.get(name);
if (dependencies == null) {
//not cached
dependencies = getRawMapping().getDependency(name);
if(dependencies != null) {
dependencyCache.put(name, dependencies);
}
}
return dependencies;
}
/**
* This is the uncached script mapping that is fed into the cache managed
* by the superclass {@link CachedDNSToSwitchMapping}
*/
private static final class RawScriptBasedMappingWithDependency
extends ScriptBasedMapping.RawScriptBasedMapping
implements DNSToSwitchMappingWithDependency {
private String dependencyScriptName;
/**
* Set the configuration and extract the configuration parameters of interest
* @param conf the new configuration
*/
@Override
public void setConf (Configuration conf) {
super.setConf(conf);
if (conf != null) {
dependencyScriptName = conf.get(DEPENDENCY_SCRIPT_FILENAME_KEY);
} else {
dependencyScriptName = null;
}
}
/**
* Constructor. The mapping is not ready to use until
* {@link #setConf(Configuration)} has been called
*/
public RawScriptBasedMappingWithDependency() {}
@Override
public List<String> getDependency(String name) {
if (name==null || dependencyScriptName==null) {
return Collections.emptyList();
}
List <String> m = new LinkedList<String>();
List <String> args = new ArrayList<String>(1);
args.add(name);
String output = runResolveCommand(args,dependencyScriptName);
if (output != null) {
StringTokenizer allSwitchInfo = new StringTokenizer(output);
while (allSwitchInfo.hasMoreTokens()) {
String switchInfo = allSwitchInfo.nextToken();
m.add(switchInfo);
}
} else {
// an error occurred. return null to signify this.
// (exn was already logged in runResolveCommand)
return null;
}
return m;
}
@Override
public String toString() {
return "dependency script " + dependencyScriptName;
}
}
}
| 5,618 | 30.567416 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/StandardSocketFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.UnknownHostException;
import java.nio.channels.SocketChannel;
import javax.net.SocketFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Specialized SocketFactory to create sockets with a SOCKS proxy
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class StandardSocketFactory extends SocketFactory {
/**
* Default empty constructor (for use with the reflection API).
*/
public StandardSocketFactory() {
}
@Override
public Socket createSocket() throws IOException {
/*
* NOTE: This returns an NIO socket so that it has an associated
* SocketChannel. As of now, this unfortunately makes streams returned
* by Socket.getInputStream() and Socket.getOutputStream() unusable
* (because a blocking read on input stream blocks write on output stream
* and vice versa).
*
* So users of these socket factories should use
* NetUtils.getInputStream(socket) and
* NetUtils.getOutputStream(socket) instead.
*
* A solution for hiding from this from user is to write a
* 'FilterSocket' on the lines of FilterInputStream and extend it by
* overriding getInputStream() and getOutputStream().
*/
return SocketChannel.open().socket();
}
@Override
public Socket createSocket(InetAddress addr, int port) throws IOException {
Socket socket = createSocket();
socket.connect(new InetSocketAddress(addr, port));
return socket;
}
@Override
public Socket createSocket(InetAddress addr, int port,
InetAddress localHostAddr, int localPort) throws IOException {
Socket socket = createSocket();
socket.bind(new InetSocketAddress(localHostAddr, localPort));
socket.connect(new InetSocketAddress(addr, port));
return socket;
}
@Override
public Socket createSocket(String host, int port) throws IOException,
UnknownHostException {
Socket socket = createSocket();
socket.connect(new InetSocketAddress(host, port));
return socket;
}
@Override
public Socket createSocket(String host, int port,
InetAddress localHostAddr, int localPort) throws IOException,
UnknownHostException {
Socket socket = createSocket();
socket.bind(new InetSocketAddress(localHostAddr, localPort));
socket.connect(new InetSocketAddress(host, port));
return socket;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
return obj.getClass().equals(this.getClass());
}
@Override
public int hashCode() {
return this.getClass().hashCode();
}
}
| 3,666 | 30.076271 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ConnectTimeoutException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net;
import java.net.SocketTimeoutException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown by {@link NetUtils#connect(java.net.Socket, java.net.SocketAddress, int)}
* if it times out while connecting to the remote host.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ConnectTimeoutException extends SocketTimeoutException {
private static final long serialVersionUID = 1L;
public ConnectTimeoutException(String msg) {
super(msg);
}
}
| 1,392 | 35.657895 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net.unix;
import java.io.Closeable;
import org.apache.hadoop.classification.InterfaceAudience;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.ReadableByteChannel;
import java.nio.ByteBuffer;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.CloseableReferenceCount;
import com.google.common.annotations.VisibleForTesting;
/**
* The implementation of UNIX domain sockets in Java.
*
* See {@link DomainSocket} for more information about UNIX domain sockets.
*/
@InterfaceAudience.LimitedPrivate("HDFS")
public class DomainSocket implements Closeable {
static {
if (SystemUtils.IS_OS_WINDOWS) {
loadingFailureReason = "UNIX Domain sockets are not available on Windows.";
} else if (!NativeCodeLoader.isNativeCodeLoaded()) {
loadingFailureReason = "libhadoop cannot be loaded.";
} else {
String problem;
try {
anchorNative();
problem = null;
} catch (Throwable t) {
problem = "DomainSocket#anchorNative got error: " + t.getMessage();
}
loadingFailureReason = problem;
}
}
static Log LOG = LogFactory.getLog(DomainSocket.class);
/**
* True only if we should validate the paths used in {@link DomainSocket#bind()}
*/
private static boolean validateBindPaths = true;
/**
* The reason why DomainSocket is not available, or null if it is available.
*/
private final static String loadingFailureReason;
/**
* Initialize the native library code.
*/
private static native void anchorNative();
/**
* This function is designed to validate that the path chosen for a UNIX
* domain socket is secure. A socket path is secure if it doesn't allow
* unprivileged users to perform a man-in-the-middle attack against it.
* For example, one way to perform a man-in-the-middle attack would be for
* a malicious user to move the server socket out of the way and create his
* own socket in the same place. Not good.
*
* Note that we only check the path once. It's possible that the
* permissions on the path could change, perhaps to something more relaxed,
* immediately after the path passes our validation test-- hence creating a
* security hole. However, the purpose of this check is to spot common
* misconfigurations. System administrators do not commonly change
* permissions on these paths while the server is running.
*
* @param path the path to validate
* @param skipComponents the number of starting path components to skip
* validation for (used only for testing)
*/
@VisibleForTesting
native static void validateSocketPathSecurity0(String path,
int skipComponents) throws IOException;
/**
* Return true only if UNIX domain sockets are available.
*/
public static String getLoadingFailureReason() {
return loadingFailureReason;
}
/**
* Disable validation of the server bind paths.
*/
@VisibleForTesting
public static void disableBindPathValidation() {
validateBindPaths = false;
}
/**
* Given a path and a port, compute the effective path by replacing
* occurrences of _PORT with the port. This is mainly to make it
* possible to run multiple DataNodes locally for testing purposes.
*
* @param path The source path
* @param port Port number to use
*
* @return The effective path
*/
public static String getEffectivePath(String path, int port) {
return path.replace("_PORT", String.valueOf(port));
}
/**
* The socket reference count and closed bit.
*/
final CloseableReferenceCount refCount;
/**
* The file descriptor associated with this UNIX domain socket.
*/
final int fd;
/**
* The path associated with this UNIX domain socket.
*/
private final String path;
/**
* The InputStream associated with this socket.
*/
private final DomainInputStream inputStream = new DomainInputStream();
/**
* The OutputStream associated with this socket.
*/
private final DomainOutputStream outputStream = new DomainOutputStream();
/**
* The Channel associated with this socket.
*/
private final DomainChannel channel = new DomainChannel();
private DomainSocket(String path, int fd) {
this.refCount = new CloseableReferenceCount();
this.fd = fd;
this.path = path;
}
private static native int bind0(String path) throws IOException;
private void unreference(boolean checkClosed) throws ClosedChannelException {
if (checkClosed) {
refCount.unreferenceCheckClosed();
} else {
refCount.unreference();
}
}
/**
* Create a new DomainSocket listening on the given path.
*
* @param path The path to bind and listen on.
* @return The new DomainSocket.
*/
public static DomainSocket bindAndListen(String path) throws IOException {
if (loadingFailureReason != null) {
throw new UnsupportedOperationException(loadingFailureReason);
}
if (validateBindPaths) {
validateSocketPathSecurity0(path, 0);
}
int fd = bind0(path);
return new DomainSocket(path, fd);
}
/**
* Create a pair of UNIX domain sockets which are connected to each other
* by calling socketpair(2).
*
* @return An array of two UNIX domain sockets connected to
* each other.
* @throws IOException on error.
*/
public static DomainSocket[] socketpair() throws IOException {
int fds[] = socketpair0();
return new DomainSocket[] {
new DomainSocket("(anonymous0)", fds[0]),
new DomainSocket("(anonymous1)", fds[1])
};
}
private static native int[] socketpair0() throws IOException;
private static native int accept0(int fd) throws IOException;
/**
* Accept a new UNIX domain connection.
*
* This method can only be used on sockets that were bound with bind().
*
* @return The new connection.
* @throws IOException If there was an I/O error
* performing the accept-- such as the
* socket being closed from under us.
* @throws SocketTimeoutException If the accept timed out.
*/
public DomainSocket accept() throws IOException {
refCount.reference();
boolean exc = true;
try {
DomainSocket ret = new DomainSocket(path, accept0(fd));
exc = false;
return ret;
} finally {
unreference(exc);
}
}
private static native int connect0(String path);
/**
* Create a new DomainSocket connected to the given path.
*
* @param path The path to connect to.
* @return The new DomainSocket.
*/
public static DomainSocket connect(String path) throws IOException {
if (loadingFailureReason != null) {
throw new UnsupportedOperationException(loadingFailureReason);
}
int fd = connect0(path);
return new DomainSocket(path, fd);
}
/**
* Return true if the file descriptor is currently open.
*
* @return True if the file descriptor is currently open.
*/
public boolean isOpen() {
return refCount.isOpen();
}
/**
* @return The socket path.
*/
public String getPath() {
return path;
}
/**
* @return The socket InputStream
*/
public DomainInputStream getInputStream() {
return inputStream;
}
/**
* @return The socket OutputStream
*/
public DomainOutputStream getOutputStream() {
return outputStream;
}
/**
* @return The socket Channel
*/
public DomainChannel getChannel() {
return channel;
}
public static final int SEND_BUFFER_SIZE = 1;
public static final int RECEIVE_BUFFER_SIZE = 2;
public static final int SEND_TIMEOUT = 3;
public static final int RECEIVE_TIMEOUT = 4;
private static native void setAttribute0(int fd, int type, int val)
throws IOException;
public void setAttribute(int type, int size) throws IOException {
refCount.reference();
boolean exc = true;
try {
setAttribute0(fd, type, size);
exc = false;
} finally {
unreference(exc);
}
}
private native int getAttribute0(int fd, int type) throws IOException;
public int getAttribute(int type) throws IOException {
refCount.reference();
int attribute;
boolean exc = true;
try {
attribute = getAttribute0(fd, type);
exc = false;
return attribute;
} finally {
unreference(exc);
}
}
private static native void close0(int fd) throws IOException;
private static native void closeFileDescriptor0(FileDescriptor fd)
throws IOException;
private static native void shutdown0(int fd) throws IOException;
/**
* Close the Socket.
*/
@Override
public void close() throws IOException {
// Set the closed bit on this DomainSocket
int count;
try {
count = refCount.setClosed();
} catch (ClosedChannelException e) {
// Someone else already closed the DomainSocket.
return;
}
// Wait for all references to go away
boolean didShutdown = false;
boolean interrupted = false;
while (count > 0) {
if (!didShutdown) {
try {
// Calling shutdown on the socket will interrupt blocking system
// calls like accept, write, and read that are going on in a
// different thread.
shutdown0(fd);
} catch (IOException e) {
LOG.error("shutdown error: ", e);
}
didShutdown = true;
}
try {
Thread.sleep(10);
} catch (InterruptedException e) {
interrupted = true;
}
count = refCount.getReferenceCount();
}
// At this point, nobody has a reference to the file descriptor,
// and nobody will be able to get one in the future either.
// We now call close(2) on the file descriptor.
// After this point, the file descriptor number will be reused by
// something else. Although this DomainSocket object continues to hold
// the old file descriptor number (it's a final field), we never use it
// again because this DomainSocket is closed.
close0(fd);
if (interrupted) {
Thread.currentThread().interrupt();
}
}
/**
* Call shutdown(SHUT_RDWR) on the UNIX domain socket.
*
* @throws IOException
*/
public void shutdown() throws IOException {
refCount.reference();
boolean exc = true;
try {
shutdown0(fd);
exc = false;
} finally {
unreference(exc);
}
}
private native static void sendFileDescriptors0(int fd,
FileDescriptor descriptors[],
byte jbuf[], int offset, int length) throws IOException;
/**
* Send some FileDescriptor objects to the process on the other side of this
* socket.
*
* @param descriptors The file descriptors to send.
* @param jbuf Some bytes to send. You must send at least
* one byte.
* @param offset The offset in the jbuf array to start at.
* @param length Length of the jbuf array to use.
*/
public void sendFileDescriptors(FileDescriptor descriptors[],
byte jbuf[], int offset, int length) throws IOException {
refCount.reference();
boolean exc = true;
try {
sendFileDescriptors0(fd, descriptors, jbuf, offset, length);
exc = false;
} finally {
unreference(exc);
}
}
private static native int receiveFileDescriptors0(int fd,
FileDescriptor[] descriptors,
byte jbuf[], int offset, int length) throws IOException;
/**
* Receive some FileDescriptor objects from the process on the other side of
* this socket.
*
* @param descriptors (output parameter) Array of FileDescriptors.
* We will fill as many slots as possible with file
* descriptors passed from the remote process. The
* other slots will contain NULL.
* @param jbuf (output parameter) Buffer to read into.
* The UNIX domain sockets API requires you to read
* at least one byte from the remote process, even
* if all you care about is the file descriptors
* you will receive.
* @param offset Offset into the byte buffer to load data
* @param length Length of the byte buffer to use for data
*
* @return The number of bytes read. This will be -1 if we
* reached EOF (similar to SocketInputStream);
* otherwise, it will be positive.
* @throws IOException if there was an I/O error.
*/
public int receiveFileDescriptors(FileDescriptor[] descriptors,
byte jbuf[], int offset, int length) throws IOException {
refCount.reference();
boolean exc = true;
try {
int nBytes = receiveFileDescriptors0(fd, descriptors, jbuf, offset, length);
exc = false;
return nBytes;
} finally {
unreference(exc);
}
}
/**
* Receive some FileDescriptor objects from the process on the other side of
* this socket, and wrap them in FileInputStream objects.
*
* See {@link DomainSocket#recvFileInputStreams(ByteBuffer)}
*/
public int recvFileInputStreams(FileInputStream[] streams, byte buf[],
int offset, int length) throws IOException {
FileDescriptor descriptors[] = new FileDescriptor[streams.length];
boolean success = false;
for (int i = 0; i < streams.length; i++) {
streams[i] = null;
}
refCount.reference();
try {
int ret = receiveFileDescriptors0(fd, descriptors, buf, offset, length);
for (int i = 0, j = 0; i < descriptors.length; i++) {
if (descriptors[i] != null) {
streams[j++] = new FileInputStream(descriptors[i]);
descriptors[i] = null;
}
}
success = true;
return ret;
} finally {
if (!success) {
for (int i = 0; i < descriptors.length; i++) {
if (descriptors[i] != null) {
try {
closeFileDescriptor0(descriptors[i]);
} catch (Throwable t) {
LOG.warn(t);
}
} else if (streams[i] != null) {
try {
streams[i].close();
} catch (Throwable t) {
LOG.warn(t);
} finally {
streams[i] = null; }
}
}
}
unreference(!success);
}
}
private native static int readArray0(int fd, byte b[], int off, int len)
throws IOException;
private native static int available0(int fd) throws IOException;
private static native void write0(int fd, int b) throws IOException;
private static native void writeArray0(int fd, byte b[], int offset, int length)
throws IOException;
private native static int readByteBufferDirect0(int fd, ByteBuffer dst,
int position, int remaining) throws IOException;
/**
* Input stream for UNIX domain sockets.
*/
@InterfaceAudience.LimitedPrivate("HDFS")
public class DomainInputStream extends InputStream {
@Override
public int read() throws IOException {
refCount.reference();
boolean exc = true;
try {
byte b[] = new byte[1];
int ret = DomainSocket.readArray0(DomainSocket.this.fd, b, 0, 1);
exc = false;
return (ret >= 0) ? b[0] : -1;
} finally {
unreference(exc);
}
}
@Override
public int read(byte b[], int off, int len) throws IOException {
refCount.reference();
boolean exc = true;
try {
int nRead = DomainSocket.readArray0(DomainSocket.this.fd, b, off, len);
exc = false;
return nRead;
} finally {
unreference(exc);
}
}
@Override
public int available() throws IOException {
refCount.reference();
boolean exc = true;
try {
int nAvailable = DomainSocket.available0(DomainSocket.this.fd);
exc = false;
return nAvailable;
} finally {
unreference(exc);
}
}
@Override
public void close() throws IOException {
DomainSocket.this.close();
}
}
/**
* Output stream for UNIX domain sockets.
*/
@InterfaceAudience.LimitedPrivate("HDFS")
public class DomainOutputStream extends OutputStream {
@Override
public void close() throws IOException {
DomainSocket.this.close();
}
@Override
public void write(int val) throws IOException {
refCount.reference();
boolean exc = true;
try {
byte b[] = new byte[1];
b[0] = (byte)val;
DomainSocket.writeArray0(DomainSocket.this.fd, b, 0, 1);
exc = false;
} finally {
unreference(exc);
}
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
refCount.reference();
boolean exc = true;
try {
DomainSocket.writeArray0(DomainSocket.this.fd, b, off, len);
exc = false;
} finally {
unreference(exc);
}
}
}
@InterfaceAudience.LimitedPrivate("HDFS")
public class DomainChannel implements ReadableByteChannel {
@Override
public boolean isOpen() {
return DomainSocket.this.isOpen();
}
@Override
public void close() throws IOException {
DomainSocket.this.close();
}
@Override
public int read(ByteBuffer dst) throws IOException {
refCount.reference();
boolean exc = true;
try {
int nread = 0;
if (dst.isDirect()) {
nread = DomainSocket.readByteBufferDirect0(DomainSocket.this.fd,
dst, dst.position(), dst.remaining());
} else if (dst.hasArray()) {
nread = DomainSocket.readArray0(DomainSocket.this.fd,
dst.array(), dst.position() + dst.arrayOffset(),
dst.remaining());
} else {
throw new AssertionError("we don't support " +
"using ByteBuffers that aren't either direct or backed by " +
"arrays");
}
if (nread > 0) {
dst.position(dst.position() + nread);
}
exc = false;
return nread;
} finally {
unreference(exc);
}
}
}
@Override
public String toString() {
return String.format("DomainSocket(fd=%d,path=%s)", fd, path);
}
}
| 19,861 | 29.323664 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.net.unix;
import java.io.Closeable;
import java.io.EOFException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.IOUtils;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.TreeMap;
import java.util.Map;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang.SystemUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.NativeCodeLoader;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.Uninterruptibles;
/**
* The DomainSocketWatcher watches a set of domain sockets to see when they
* become readable, or closed. When one of those events happens, it makes a
* callback.
*
* See {@link DomainSocket} for more information about UNIX domain sockets.
*/
@InterfaceAudience.LimitedPrivate("HDFS")
public final class DomainSocketWatcher implements Closeable {
static {
if (SystemUtils.IS_OS_WINDOWS) {
loadingFailureReason = "UNIX Domain sockets are not available on Windows.";
} else if (!NativeCodeLoader.isNativeCodeLoaded()) {
loadingFailureReason = "libhadoop cannot be loaded.";
} else {
String problem;
try {
anchorNative();
problem = null;
} catch (Throwable t) {
problem = "DomainSocketWatcher#anchorNative got error: " +
t.getMessage();
}
loadingFailureReason = problem;
}
}
static Log LOG = LogFactory.getLog(DomainSocketWatcher.class);
/**
* The reason why DomainSocketWatcher is not available, or null if it is
* available.
*/
private final static String loadingFailureReason;
/**
* Initializes the native library code.
*/
private static native void anchorNative();
public static String getLoadingFailureReason() {
return loadingFailureReason;
}
public interface Handler {
/**
* Handles an event on a socket. An event may be the socket becoming
* readable, or the remote end being closed.
*
* @param sock The socket that the event occurred on.
* @return Whether we should close the socket.
*/
boolean handle(DomainSocket sock);
}
/**
* Handler for {DomainSocketWatcher#notificationSockets[1]}
*/
private class NotificationHandler implements Handler {
public boolean handle(DomainSocket sock) {
assert(lock.isHeldByCurrentThread());
try {
kicked = false;
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": NotificationHandler: doing a read on " +
sock.fd);
}
if (sock.getInputStream().read() == -1) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": NotificationHandler: got EOF on " + sock.fd);
}
throw new EOFException();
}
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": NotificationHandler: read succeeded on " +
sock.fd);
}
return false;
} catch (IOException e) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": NotificationHandler: setting closed to " +
"true for " + sock.fd);
}
closed = true;
return true;
}
}
}
private static class Entry {
final DomainSocket socket;
final Handler handler;
Entry(DomainSocket socket, Handler handler) {
this.socket = socket;
this.handler = handler;
}
DomainSocket getDomainSocket() {
return socket;
}
Handler getHandler() {
return handler;
}
}
/**
* The FdSet is a set of file descriptors that gets passed to poll(2).
* It contains a native memory segment, so that we don't have to copy
* in the poll0 function.
*/
private static class FdSet {
private long data;
private native static long alloc0();
FdSet() {
data = alloc0();
}
/**
* Add a file descriptor to the set.
*
* @param fd The file descriptor to add.
*/
native void add(int fd);
/**
* Remove a file descriptor from the set.
*
* @param fd The file descriptor to remove.
*/
native void remove(int fd);
/**
* Get an array containing all the FDs marked as readable.
* Also clear the state of all FDs.
*
* @return An array containing all of the currently readable file
* descriptors.
*/
native int[] getAndClearReadableFds();
/**
* Close the object and de-allocate the memory used.
*/
native void close();
}
/**
* Lock which protects toAdd, toRemove, and closed.
*/
private final ReentrantLock lock = new ReentrantLock();
/**
* Condition variable which indicates that toAdd and toRemove have been
* processed.
*/
private final Condition processedCond = lock.newCondition();
/**
* Entries to add.
*/
private final LinkedList<Entry> toAdd =
new LinkedList<Entry>();
/**
* Entries to remove.
*/
private final TreeMap<Integer, DomainSocket> toRemove =
new TreeMap<Integer, DomainSocket>();
/**
* Maximum length of time to go between checking whether the interrupted
* bit has been set for this thread.
*/
private final int interruptCheckPeriodMs;
/**
* A pair of sockets used to wake up the thread after it has called poll(2).
*/
private final DomainSocket notificationSockets[];
/**
* Whether or not this DomainSocketWatcher is closed.
*/
private boolean closed = false;
/**
* True if we have written a byte to the notification socket. We should not
* write anything else to the socket until the notification handler has had a
* chance to run. Otherwise, our thread might block, causing deadlock.
* See HADOOP-11333 for details.
*/
private boolean kicked = false;
public DomainSocketWatcher(int interruptCheckPeriodMs, String src)
throws IOException {
if (loadingFailureReason != null) {
throw new UnsupportedOperationException(loadingFailureReason);
}
Preconditions.checkArgument(interruptCheckPeriodMs > 0);
this.interruptCheckPeriodMs = interruptCheckPeriodMs;
notificationSockets = DomainSocket.socketpair();
watcherThread.setDaemon(true);
watcherThread.setName(src + " DomainSocketWatcher");
watcherThread
.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread thread, Throwable t) {
LOG.error(thread + " terminating on unexpected exception", t);
}
});
watcherThread.start();
}
/**
* Close the DomainSocketWatcher and wait for its thread to terminate.
*
* If there is more than one close, all but the first will be ignored.
*/
@Override
public void close() throws IOException {
lock.lock();
try {
if (closed) return;
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": closing");
}
closed = true;
} finally {
lock.unlock();
}
// Close notificationSockets[0], so that notificationSockets[1] gets an EOF
// event. This will wake up the thread immediately if it is blocked inside
// the select() system call.
notificationSockets[0].close();
// Wait for the select thread to terminate.
Uninterruptibles.joinUninterruptibly(watcherThread);
}
@VisibleForTesting
public boolean isClosed() {
lock.lock();
try {
return closed;
} finally {
lock.unlock();
}
}
/**
* Add a socket.
*
* @param sock The socket to add. It is an error to re-add a socket that
* we are already watching.
* @param handler The handler to associate with this socket. This may be
* called any time after this function is called.
*/
public void add(DomainSocket sock, Handler handler) {
lock.lock();
try {
if (closed) {
handler.handle(sock);
IOUtils.cleanup(LOG, sock);
return;
}
Entry entry = new Entry(sock, handler);
try {
sock.refCount.reference();
} catch (ClosedChannelException e1) {
// If the socket is already closed before we add it, invoke the
// handler immediately. Then we're done.
handler.handle(sock);
return;
}
toAdd.add(entry);
kick();
while (true) {
try {
processedCond.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
if (!toAdd.contains(entry)) {
break;
}
}
} finally {
lock.unlock();
}
}
/**
* Remove a socket. Its handler will be called.
*
* @param sock The socket to remove.
*/
public void remove(DomainSocket sock) {
lock.lock();
try {
if (closed) return;
toRemove.put(sock.fd, sock);
kick();
while (true) {
try {
processedCond.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
if (!toRemove.containsKey(sock.fd)) {
break;
}
}
} finally {
lock.unlock();
}
}
/**
* Wake up the DomainSocketWatcher thread.
*/
private void kick() {
assert(lock.isHeldByCurrentThread());
if (kicked) {
return;
}
try {
notificationSockets[0].getOutputStream().write(0);
kicked = true;
} catch (IOException e) {
if (!closed) {
LOG.error(this + ": error writing to notificationSockets[0]", e);
}
}
}
/**
* Send callback and return whether or not the domain socket was closed as a
* result of processing.
*
* @param caller reason for call
* @param entries mapping of file descriptor to entry
* @param fdSet set of file descriptors
* @param fd file descriptor
* @return true if the domain socket was closed as a result of processing
*/
private boolean sendCallback(String caller, TreeMap<Integer, Entry> entries,
FdSet fdSet, int fd) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": " + caller + " starting sendCallback for fd " + fd);
}
Entry entry = entries.get(fd);
Preconditions.checkNotNull(entry,
this + ": fdSet contained " + fd + ", which we were " +
"not tracking.");
DomainSocket sock = entry.getDomainSocket();
if (entry.getHandler().handle(sock)) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": " + caller + ": closing fd " + fd +
" at the request of the handler.");
}
if (toRemove.remove(fd) != null) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": " + caller + " : sendCallback processed fd " +
fd + " in toRemove.");
}
}
try {
sock.refCount.unreferenceCheckClosed();
} catch (IOException e) {
Preconditions.checkArgument(false,
this + ": file descriptor " + sock.fd + " was closed while " +
"still in the poll(2) loop.");
}
IOUtils.cleanup(LOG, sock);
fdSet.remove(fd);
return true;
} else {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": " + caller + ": sendCallback not " +
"closing fd " + fd);
}
return false;
}
}
/**
* Send callback, and if the domain socket was closed as a result of
* processing, then also remove the entry for the file descriptor.
*
* @param caller reason for call
* @param entries mapping of file descriptor to entry
* @param fdSet set of file descriptors
* @param fd file descriptor
*/
private void sendCallbackAndRemove(String caller,
TreeMap<Integer, Entry> entries, FdSet fdSet, int fd) {
if (sendCallback(caller, entries, fdSet, fd)) {
entries.remove(fd);
}
}
@VisibleForTesting
final Thread watcherThread = new Thread(new Runnable() {
@Override
public void run() {
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": starting with interruptCheckPeriodMs = " +
interruptCheckPeriodMs);
}
final TreeMap<Integer, Entry> entries = new TreeMap<Integer, Entry>();
FdSet fdSet = new FdSet();
addNotificationSocket(entries, fdSet);
try {
while (true) {
lock.lock();
try {
for (int fd : fdSet.getAndClearReadableFds()) {
sendCallbackAndRemove("getAndClearReadableFds", entries, fdSet,
fd);
}
if (!(toAdd.isEmpty() && toRemove.isEmpty())) {
// Handle pending additions (before pending removes).
for (Iterator<Entry> iter = toAdd.iterator(); iter.hasNext(); ) {
Entry entry = iter.next();
iter.remove();
DomainSocket sock = entry.getDomainSocket();
Entry prevEntry = entries.put(sock.fd, entry);
Preconditions.checkState(prevEntry == null,
this + ": tried to watch a file descriptor that we " +
"were already watching: " + sock);
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": adding fd " + sock.fd);
}
fdSet.add(sock.fd);
}
// Handle pending removals
while (true) {
Map.Entry<Integer, DomainSocket> entry = toRemove.firstEntry();
if (entry == null) break;
sendCallbackAndRemove("handlePendingRemovals",
entries, fdSet, entry.getValue().fd);
}
processedCond.signalAll();
}
// Check if the thread should terminate. Doing this check now is
// easier than at the beginning of the loop, since we know toAdd and
// toRemove are now empty and processedCond has been notified if it
// needed to be.
if (closed) {
if (LOG.isDebugEnabled()) {
LOG.debug(toString() + " thread terminating.");
}
return;
}
// Check if someone sent our thread an InterruptedException while we
// were waiting in poll().
if (Thread.interrupted()) {
throw new InterruptedException();
}
} finally {
lock.unlock();
}
doPoll0(interruptCheckPeriodMs, fdSet);
}
} catch (InterruptedException e) {
LOG.info(toString() + " terminating on InterruptedException");
} catch (Throwable e) {
LOG.error(toString() + " terminating on exception", e);
} finally {
lock.lock();
try {
kick(); // allow the handler for notificationSockets[0] to read a byte
for (Entry entry : entries.values()) {
// We do not remove from entries as we iterate, because that can
// cause a ConcurrentModificationException.
sendCallback("close", entries, fdSet, entry.getDomainSocket().fd);
}
entries.clear();
fdSet.close();
closed = true;
if (!(toAdd.isEmpty() && toRemove.isEmpty())) {
// Items in toAdd might not be added to entries, handle it here
for (Iterator<Entry> iter = toAdd.iterator(); iter.hasNext();) {
Entry entry = iter.next();
entry.getDomainSocket().refCount.unreference();
entry.getHandler().handle(entry.getDomainSocket());
IOUtils.cleanup(LOG, entry.getDomainSocket());
iter.remove();
}
// Items in toRemove might not be really removed, handle it here
while (true) {
Map.Entry<Integer, DomainSocket> entry = toRemove.firstEntry();
if (entry == null)
break;
sendCallback("close", entries, fdSet, entry.getValue().fd);
}
}
processedCond.signalAll();
} finally {
lock.unlock();
}
}
}
});
private void addNotificationSocket(final TreeMap<Integer, Entry> entries,
FdSet fdSet) {
entries.put(notificationSockets[1].fd,
new Entry(notificationSockets[1], new NotificationHandler()));
try {
notificationSockets[1].refCount.reference();
} catch (IOException e) {
throw new RuntimeException(e);
}
fdSet.add(notificationSockets[1].fd);
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": adding notificationSocket " +
notificationSockets[1].fd + ", connected to " +
notificationSockets[0].fd);
}
}
public String toString() {
return "DomainSocketWatcher(" + System.identityHashCode(this) + ")";
}
private static native int doPoll0(int maxWaitMs, FdSet readFds)
throws IOException;
}
| 17,949 | 30.055363 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log;
import java.io.*;
import java.net.*;
import java.util.regex.Pattern;
import javax.servlet.*;
import javax.servlet.http.*;
import com.google.common.base.Charsets;
import org.apache.commons.logging.*;
import org.apache.commons.logging.impl.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.util.ServletUtil;
/**
* Change log level in runtime.
*/
@InterfaceStability.Evolving
public class LogLevel {
public static final String USAGES = "\nUsage: General options are:\n"
+ "\t[-getlevel <host:httpPort> <name>]\n"
+ "\t[-setlevel <host:httpPort> <name> <level>]\n";
/**
* A command line implementation
*/
public static void main(String[] args) {
if (args.length == 3 && "-getlevel".equals(args[0])) {
process("http://" + args[1] + "/logLevel?log=" + args[2]);
return;
}
else if (args.length == 4 && "-setlevel".equals(args[0])) {
process("http://" + args[1] + "/logLevel?log=" + args[2]
+ "&level=" + args[3]);
return;
}
System.err.println(USAGES);
System.exit(-1);
}
private static void process(String urlstring) {
try {
URL url = new URL(urlstring);
System.out.println("Connecting to " + url);
URLConnection connection = url.openConnection();
connection.connect();
BufferedReader in = new BufferedReader(new InputStreamReader(
connection.getInputStream(), Charsets.UTF_8));
for(String line; (line = in.readLine()) != null; )
if (line.startsWith(MARKER)) {
System.out.println(TAG.matcher(line).replaceAll(""));
}
in.close();
} catch (IOException ioe) {
System.err.println("" + ioe);
}
}
static final String MARKER = "<!-- OUTPUT -->";
static final Pattern TAG = Pattern.compile("<[^>]*>");
/**
* A servlet implementation
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public static class Servlet extends HttpServlet {
private static final long serialVersionUID = 1L;
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws ServletException, IOException {
// Do the authorization
if (!HttpServer2.hasAdministratorAccess(getServletContext(), request,
response)) {
return;
}
PrintWriter out = ServletUtil.initHTML(response, "Log Level");
String logName = ServletUtil.getParameter(request, "log");
String level = ServletUtil.getParameter(request, "level");
if (logName != null) {
out.println("<br /><hr /><h3>Results</h3>");
out.println(MARKER
+ "Submitted Log Name: <b>" + logName + "</b><br />");
Log log = LogFactory.getLog(logName);
out.println(MARKER
+ "Log Class: <b>" + log.getClass().getName() +"</b><br />");
if (level != null) {
out.println(MARKER + "Submitted Level: <b>" + level + "</b><br />");
}
if (log instanceof Log4JLogger) {
process(((Log4JLogger)log).getLogger(), level, out);
}
else if (log instanceof Jdk14Logger) {
process(((Jdk14Logger)log).getLogger(), level, out);
}
else {
out.println("Sorry, " + log.getClass() + " not supported.<br />");
}
}
out.println(FORMS);
out.println(ServletUtil.HTML_TAIL);
}
static final String FORMS = "\n<br /><hr /><h3>Get / Set</h3>"
+ "\n<form>Log: <input type='text' size='50' name='log' /> "
+ "<input type='submit' value='Get Log Level' />"
+ "</form>"
+ "\n<form>Log: <input type='text' size='50' name='log' /> "
+ "Level: <input type='text' name='level' /> "
+ "<input type='submit' value='Set Log Level' />"
+ "</form>";
private static void process(org.apache.log4j.Logger log, String level,
PrintWriter out) throws IOException {
if (level != null) {
if (!level.equals(org.apache.log4j.Level.toLevel(level).toString())) {
out.println(MARKER + "Bad level : <b>" + level + "</b><br />");
} else {
log.setLevel(org.apache.log4j.Level.toLevel(level));
out.println(MARKER + "Setting Level to " + level + " ...<br />");
}
}
out.println(MARKER
+ "Effective level: <b>" + log.getEffectiveLevel() + "</b><br />");
}
private static void process(java.util.logging.Logger log, String level,
PrintWriter out) throws IOException {
if (level != null) {
log.setLevel(java.util.logging.Level.parse(level));
out.println(MARKER + "Setting Level to " + level + " ...<br />");
}
java.util.logging.Level lev;
for(; (lev = log.getLevel()) == null; log = log.getParent());
out.println(MARKER + "Effective level: <b>" + lev + "</b><br />");
}
}
}
| 5,851 | 33.627219 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/Log4Json.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log;
import org.apache.log4j.Layout;
import org.apache.log4j.helpers.ISO8601DateFormat;
import org.apache.log4j.spi.LoggingEvent;
import org.apache.log4j.spi.ThrowableInformation;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.MappingJsonFactory;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.node.ContainerNode;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import java.text.DateFormat;
import java.util.Date;
/**
* This offers a log layout for JSON, with some test entry points. It's purpose is
* to allow Log4J to generate events that are easy for other programs to parse, but which are somewhat
* human-readable.
*
* Some features.
*
* <ol>
* <li>Every event is a standalone JSON clause</li>
* <li>Time is published as a time_t event since 1/1/1970
* -this is the fastest to generate.</li>
* <li>An ISO date is generated, but this is cached and will only be accurate to within a second</li>
* <li>the stack trace is included as an array</li>
* </ol>
*
* A simple log event will resemble the following
* <pre>
* {"name":"test","time":1318429136789,"date":"2011-10-12 15:18:56,789","level":"INFO","thread":"main","message":"test message"}
* </pre>
*
* An event with an error will contain data similar to that below (which has been reformatted to be multi-line).
*
* <pre>
* {
* "name":"testException",
* "time":1318429136789,
* "date":"2011-10-12 15:18:56,789",
* "level":"INFO",
* "thread":"quoted\"",
* "message":"new line\n and {}",
* "exceptionclass":"java.net.NoRouteToHostException",
* "stack":[
* "java.net.NoRouteToHostException: that box caught fire 3 years ago",
* "\tat org.apache.hadoop.log.TestLog4Json.testException(TestLog4Json.java:49)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)",
* "\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)",
* "\tat java.lang.reflect.Method.invoke(Method.java:597)",
* "\tat junit.framework.TestCase.runTest(TestCase.java:168)",
* "\tat junit.framework.TestCase.runBare(TestCase.java:134)",
* "\tat junit.framework.TestResult$1.protect(TestResult.java:110)",
* "\tat junit.framework.TestResult.runProtected(TestResult.java:128)",
* "\tat junit.framework.TestResult.run(TestResult.java:113)",
* "\tat junit.framework.TestCase.run(TestCase.java:124)",
* "\tat junit.framework.TestSuite.runTest(TestSuite.java:232)",
* "\tat junit.framework.TestSuite.run(TestSuite.java:227)",
* "\tat org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)",
* "\tat org.apache.maven.surefire.junit4.JUnit4TestSet.execute(JUnit4TestSet.java:59)",
* "\tat org.apache.maven.surefire.suite.AbstractDirectoryTestSuite.executeTestSet(AbstractDirectoryTestSuite.java:120)",
* "\tat org.apache.maven.surefire.suite.AbstractDirectoryTestSuite.execute(AbstractDirectoryTestSuite.java:145)",
* "\tat org.apache.maven.surefire.Surefire.run(Surefire.java:104)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)",
* "\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)",
* "\tat java.lang.reflect.Method.invoke(Method.java:597)",
* "\tat org.apache.maven.surefire.booter.SurefireBooter.runSuitesInProcess(SurefireBooter.java:290)",
* "\tat org.apache.maven.surefire.booter.SurefireBooter.main(SurefireBooter.java:1017)"
* ]
* }
* </pre>
*/
public class Log4Json extends Layout {
/**
* Jackson factories are thread safe when constructing parsers and generators.
* They are not thread safe in configure methods; if there is to be any
* configuration it must be done in a static intializer block.
*/
private static final JsonFactory factory = new MappingJsonFactory();
public static final String DATE = "date";
public static final String EXCEPTION_CLASS = "exceptionclass";
public static final String LEVEL = "level";
public static final String MESSAGE = "message";
public static final String NAME = "name";
public static final String STACK = "stack";
public static final String THREAD = "thread";
public static final String TIME = "time";
public static final String JSON_TYPE = "application/json";
private final DateFormat dateFormat;
public Log4Json() {
dateFormat = new ISO8601DateFormat();
}
/**
* @return the mime type of JSON
*/
@Override
public String getContentType() {
return JSON_TYPE;
}
@Override
public String format(LoggingEvent event) {
try {
return toJson(event);
} catch (IOException e) {
//this really should not happen, and rather than throw an exception
//which may hide the real problem, the log class is printed
//in JSON format. The classname is used to ensure valid JSON is
//returned without playing escaping games
return "{ \"logfailure\":\"" + e.getClass().toString() + "\"}";
}
}
/**
* Convert an event to JSON
*
* @param event the event -must not be null
* @return a string value
* @throws IOException on problems generating the JSON
*/
public String toJson(LoggingEvent event) throws IOException {
StringWriter writer = new StringWriter();
toJson(writer, event);
return writer.toString();
}
/**
* Convert an event to JSON
*
* @param writer the destination writer
* @param event the event -must not be null
* @return the writer
* @throws IOException on problems generating the JSON
*/
public Writer toJson(final Writer writer, final LoggingEvent event)
throws IOException {
ThrowableInformation ti = event.getThrowableInformation();
toJson(writer,
event.getLoggerName(),
event.getTimeStamp(),
event.getLevel().toString(),
event.getThreadName(),
event.getRenderedMessage(),
ti);
return writer;
}
/**
* Build a JSON entry from the parameters. This is public for testing.
*
* @param writer destination
* @param loggerName logger name
* @param timeStamp time_t value
* @param level level string
* @param threadName name of the thread
* @param message rendered message
* @param ti nullable thrown information
* @return the writer
* @throws IOException on any problem
*/
public Writer toJson(final Writer writer,
final String loggerName,
final long timeStamp,
final String level,
final String threadName,
final String message,
final ThrowableInformation ti) throws IOException {
JsonGenerator json = factory.createJsonGenerator(writer);
json.writeStartObject();
json.writeStringField(NAME, loggerName);
json.writeNumberField(TIME, timeStamp);
Date date = new Date(timeStamp);
json.writeStringField(DATE, dateFormat.format(date));
json.writeStringField(LEVEL, level);
json.writeStringField(THREAD, threadName);
json.writeStringField(MESSAGE, message);
if (ti != null) {
//there is some throwable info, but if the log event has been sent over the wire,
//there may not be a throwable inside it, just a summary.
Throwable thrown = ti.getThrowable();
String eclass = (thrown != null) ?
thrown.getClass().getName()
: "";
json.writeStringField(EXCEPTION_CLASS, eclass);
String[] stackTrace = ti.getThrowableStrRep();
json.writeArrayFieldStart(STACK);
for (String row : stackTrace) {
json.writeString(row);
}
json.writeEndArray();
}
json.writeEndObject();
json.flush();
json.close();
return writer;
}
/**
* This appender does not ignore throwables
*
* @return false, always
*/
@Override
public boolean ignoresThrowable() {
return false;
}
/**
* Do nothing
*/
@Override
public void activateOptions() {
}
/**
* For use in tests
*
* @param json incoming JSON to parse
* @return a node tree
* @throws IOException on any parsing problems
*/
public static ContainerNode parse(String json) throws IOException {
ObjectMapper mapper = new ObjectMapper(factory);
JsonNode jsonNode = mapper.readTree(json);
if (!(jsonNode instanceof ContainerNode)) {
throw new IOException("Wrong JSON data: " + json);
}
return (ContainerNode) jsonNode;
}
}
| 9,812 | 36.311787 | 132 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/EventCounter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log;
/**
* A log4J Appender that simply counts logging events in three levels:
* fatal, error and warn. The class name is used in log4j.properties
* @deprecated use {@link org.apache.hadoop.log.metrics.EventCounter} instead
*/
@Deprecated
public class EventCounter extends org.apache.hadoop.log.metrics.EventCounter {
static {
// The logging system is not started yet.
System.err.println("WARNING: "+ EventCounter.class.getName() +
" is deprecated. Please use "+
org.apache.hadoop.log.metrics.EventCounter.class.getName() +
" in all the log4j.properties files.");
}
}
| 1,442 | 40.228571 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/metrics/EventCounter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log.metrics;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.spi.LoggingEvent;
/**
* A log4J Appender that simply counts logging events in three levels:
* fatal, error and warn. The class name is used in log4j.properties
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class EventCounter extends AppenderSkeleton {
private static final int FATAL = 0;
private static final int ERROR = 1;
private static final int WARN = 2;
private static final int INFO = 3;
private static class EventCounts {
private final long[] counts = {0, 0, 0, 0};
private synchronized void incr(int i) {
++counts[i];
}
private synchronized long get(int i) {
return counts[i];
}
}
private static EventCounts counts = new EventCounts();
@InterfaceAudience.Private
public static long getFatal() {
return counts.get(FATAL);
}
@InterfaceAudience.Private
public static long getError() {
return counts.get(ERROR);
}
@InterfaceAudience.Private
public static long getWarn() {
return counts.get(WARN);
}
@InterfaceAudience.Private
public static long getInfo() {
return counts.get(INFO);
}
@Override
public void append(LoggingEvent event) {
Level level = event.getLevel();
// depends on the api, == might not work
// see HADOOP-7055 for details
if (level.equals(Level.INFO)) {
counts.incr(INFO);
}
else if (level.equals(Level.WARN)) {
counts.incr(WARN);
}
else if (level.equals(Level.ERROR)) {
counts.incr(ERROR);
}
else if (level.equals(Level.FATAL)) {
counts.incr(FATAL);
}
}
@Override
public void close() {
}
@Override
public boolean requiresLayout() {
return false;
}
}
| 2,744 | 26.178218 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.tracing.TraceAdminPB.AddSpanReceiverRequestProto;
import org.apache.hadoop.tracing.TraceAdminPB.AddSpanReceiverResponseProto;
import org.apache.hadoop.tracing.TraceAdminPB.ListSpanReceiversRequestProto;
import org.apache.hadoop.tracing.TraceAdminPB.ListSpanReceiversResponseProto;
import org.apache.hadoop.tracing.TraceAdminPB.ConfigPair;
import org.apache.hadoop.tracing.TraceAdminPB.RemoveSpanReceiverRequestProto;
import org.apache.hadoop.tracing.TraceAdminPB.SpanReceiverListInfo;
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
import com.google.protobuf.ServiceException;
@InterfaceAudience.Private
public class TraceAdminProtocolTranslatorPB implements
TraceAdminProtocol, ProtocolTranslator, Closeable {
private final TraceAdminProtocolPB rpcProxy;
public TraceAdminProtocolTranslatorPB(TraceAdminProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);
}
@Override
public SpanReceiverInfo[] listSpanReceivers() throws IOException {
ArrayList<SpanReceiverInfo> infos = new ArrayList<SpanReceiverInfo>(1);
try {
ListSpanReceiversRequestProto req =
ListSpanReceiversRequestProto.newBuilder().build();
ListSpanReceiversResponseProto resp =
rpcProxy.listSpanReceivers(null, req);
for (SpanReceiverListInfo info : resp.getDescriptionsList()) {
infos.add(new SpanReceiverInfo(info.getId(), info.getClassName()));
}
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
return infos.toArray(new SpanReceiverInfo[infos.size()]);
}
@Override
public long addSpanReceiver(SpanReceiverInfo info) throws IOException {
try {
AddSpanReceiverRequestProto.Builder bld =
AddSpanReceiverRequestProto.newBuilder();
bld.setClassName(info.getClassName());
for (ConfigurationPair configPair : info.configPairs) {
ConfigPair tuple = ConfigPair.newBuilder().
setKey(configPair.getKey()).
setValue(configPair.getValue()).build();
bld.addConfig(tuple);
}
AddSpanReceiverResponseProto resp =
rpcProxy.addSpanReceiver(null, bld.build());
return resp.getId();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void removeSpanReceiver(long spanReceiverId) throws IOException {
try {
RemoveSpanReceiverRequestProto req =
RemoveSpanReceiverRequestProto.newBuilder()
.setId(spanReceiverId).build();
rpcProxy.removeSpanReceiver(null, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
}
| 3,968 | 36.443396 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.retry.AtMostOnce;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol interface that provides tracing.
*/
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface TraceAdminProtocol {
public static final long versionID = 1L;
/**
* List the currently active trace span receivers.
*
* @throws IOException On error.
*/
@Idempotent
public SpanReceiverInfo[] listSpanReceivers() throws IOException;
/**
* Add a new trace span receiver.
*
* @param desc The span receiver description.
* @return The ID of the new trace span receiver.
*
* @throws IOException On error.
*/
@AtMostOnce
public long addSpanReceiver(SpanReceiverInfo desc) throws IOException;
/**
* Remove a trace span receiver.
*
* @param spanReceiverId The id of the span receiver to remove.
* @throws IOException On error.
*/
@AtMostOnce
public void removeSpanReceiver(long spanReceiverId) throws IOException;
}
| 2,342 | 32.471429 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
/**
* A command-line tool for viewing and modifying tracing settings.
*/
@InterfaceAudience.Private
public class TraceAdmin extends Configured implements Tool {
private TraceAdminProtocolPB proxy;
private TraceAdminProtocolTranslatorPB remote;
private void usage() {
PrintStream err = System.err;
err.print(
"Hadoop tracing configuration commands:\n" +
" -add [-class classname] [-Ckey=value] [-Ckey2=value2] ...\n" +
" Add a span receiver with the provided class name. Configuration\n" +
" keys for the span receiver can be specified with the -C options.\n" +
" The span receiver will also inherit whatever configuration keys\n" +
" exist in the daemon's configuration.\n" +
" -help: Print this help message.\n" +
" -host [hostname:port]\n" +
" Specify the hostname and port of the daemon to examine.\n" +
" Required for all commands.\n" +
" -list: List the current span receivers.\n" +
" -remove [id]\n" +
" Remove the span receiver with the specified id. Use -list to\n" +
" find the id of each receiver.\n"
);
}
private int listSpanReceivers(List<String> args) throws IOException {
SpanReceiverInfo infos[] = remote.listSpanReceivers();
if (infos.length == 0) {
System.out.println("[no span receivers found]");
return 0;
}
TableListing listing = new TableListing.Builder().
addField("ID").
addField("CLASS").
showHeaders().
build();
for (SpanReceiverInfo info : infos) {
listing.addRow("" + info.getId(), info.getClassName());
}
System.out.println(listing.toString());
return 0;
}
private final static String CONFIG_PREFIX = "-C";
private int addSpanReceiver(List<String> args) throws IOException {
String className = StringUtils.popOptionWithArgument("-class", args);
if (className == null) {
System.err.println("You must specify the classname with -class.");
return 1;
}
ByteArrayOutputStream configStream = new ByteArrayOutputStream();
PrintStream configsOut = new PrintStream(configStream, false, "UTF-8");
SpanReceiverInfoBuilder factory = new SpanReceiverInfoBuilder(className);
String prefix = "";
for (int i = 0; i < args.size(); ++i) {
String str = args.get(i);
if (!str.startsWith(CONFIG_PREFIX)) {
System.err.println("Can't understand argument: " + str);
return 1;
}
str = str.substring(CONFIG_PREFIX.length());
int equalsIndex = str.indexOf("=");
if (equalsIndex < 0) {
System.err.println("Can't parse configuration argument " + str);
System.err.println("Arguments must be in the form key=value");
return 1;
}
String key = str.substring(0, equalsIndex);
String value = str.substring(equalsIndex + 1);
factory.addConfigurationPair(key, value);
configsOut.print(prefix + key + " = " + value);
prefix = ", ";
}
String configStreamStr = configStream.toString("UTF-8");
try {
long id = remote.addSpanReceiver(factory.build());
System.out.println("Added trace span receiver " + id +
" with configuration " + configStreamStr);
} catch (IOException e) {
System.out.println("addSpanReceiver error with configuration " +
configStreamStr);
throw e;
}
return 0;
}
private int removeSpanReceiver(List<String> args) throws IOException {
String indexStr = StringUtils.popFirstNonOption(args);
long id = -1;
try {
id = Long.parseLong(indexStr);
} catch (NumberFormatException e) {
System.err.println("Failed to parse ID string " +
indexStr + ": " + e.getMessage());
return 1;
}
remote.removeSpanReceiver(id);
System.err.println("Removed trace span receiver " + id);
return 0;
}
@Override
public int run(String argv[]) throws Exception {
LinkedList<String> args = new LinkedList<String>();
for (String arg : argv) {
args.add(arg);
}
if (StringUtils.popOption("-h", args) ||
StringUtils.popOption("-help", args)) {
usage();
return 0;
} else if (args.size() == 0) {
usage();
return 0;
}
String hostPort = StringUtils.popOptionWithArgument("-host", args);
if (hostPort == null) {
System.err.println("You must specify a host with -host.");
return 1;
}
if (args.size() < 0) {
System.err.println("You must specify an operation.");
return 1;
}
RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class,
ProtobufRpcEngine.class);
InetSocketAddress address = NetUtils.createSocketAddr(hostPort);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
Class<?> xface = TraceAdminProtocolPB.class;
proxy = (TraceAdminProtocolPB)RPC.getProxy(xface,
RPC.getProtocolVersion(xface), address,
ugi, getConf(), NetUtils.getDefaultSocketFactory(getConf()), 0);
remote = new TraceAdminProtocolTranslatorPB(proxy);
try {
if (args.get(0).equals("-list")) {
return listSpanReceivers(args.subList(1, args.size()));
} else if (args.get(0).equals("-add")) {
return addSpanReceiver(args.subList(1, args.size()));
} else if (args.get(0).equals("-remove")) {
return removeSpanReceiver(args.subList(1, args.size()));
} else {
System.err.println("Unrecognized tracing command: " + args.get(0));
System.err.println("Use -help for help.");
return 1;
}
} finally {
remote.close();
}
}
public static void main(String[] argv) throws Exception {
TraceAdmin admin = new TraceAdmin();
admin.setConf(new Configuration());
System.exit(admin.run(argv));
}
}
| 7,461 | 36.124378 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.htrace.SpanReceiver;
import org.apache.htrace.SpanReceiverBuilder;
import org.apache.htrace.Trace;
import org.apache.htrace.impl.LocalFileSpanReceiver;
/**
* This class provides functions for reading the names of SpanReceivers from
* the Hadoop configuration, adding those SpanReceivers to the Tracer,
* and closing those SpanReceivers when appropriate.
* This class does nothing If no SpanReceiver is configured.
*/
@InterfaceAudience.Private
public class SpanReceiverHost implements TraceAdminProtocol {
public static final String SPAN_RECEIVERS_CONF_SUFFIX =
"spanreceiver.classes";
private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
private static final HashMap<String, SpanReceiverHost> hosts =
new HashMap<String, SpanReceiverHost>(1);
private final TreeMap<Long, SpanReceiver> receivers =
new TreeMap<Long, SpanReceiver>();
private final String confPrefix;
private Configuration config;
private boolean closed = false;
private long highestId = 1;
private final static String LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX =
"local-file-span-receiver.path";
public static SpanReceiverHost get(Configuration conf, String confPrefix) {
synchronized (SpanReceiverHost.class) {
SpanReceiverHost host = hosts.get(confPrefix);
if (host != null) {
return host;
}
final SpanReceiverHost newHost = new SpanReceiverHost(confPrefix);
newHost.loadSpanReceivers(conf);
ShutdownHookManager.get().addShutdownHook(new Runnable() {
public void run() {
newHost.closeReceivers();
}
}, 0);
hosts.put(confPrefix, newHost);
return newHost;
}
}
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
private SpanReceiverHost(String confPrefix) {
this.confPrefix = confPrefix;
}
/**
* Reads the names of classes specified in the
* "hadoop.htrace.spanreceiver.classes" property and instantiates and registers
* them with the Tracer as SpanReceiver's.
*
* The nullary constructor is called during construction, but if the classes
* specified implement the Configurable interface, setConfiguration() will be
* called on them. This allows SpanReceivers to use values from the Hadoop
* configuration.
*/
public synchronized void loadSpanReceivers(Configuration conf) {
config = new Configuration(conf);
String receiverKey = confPrefix + SPAN_RECEIVERS_CONF_SUFFIX;
String[] receiverNames = config.getTrimmedStrings(receiverKey);
if (receiverNames == null || receiverNames.length == 0) {
if (LOG.isTraceEnabled()) {
LOG.trace("No span receiver names found in " + receiverKey + ".");
}
return;
}
// It's convenient to have each daemon log to a random trace file when
// testing.
String pathKey = confPrefix + LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX;
if (config.get(pathKey) == null) {
String uniqueFile = LocalFileSpanReceiver.getUniqueLocalTraceFileName();
config.set(pathKey, uniqueFile);
if (LOG.isTraceEnabled()) {
LOG.trace("Set " + pathKey + " to " + uniqueFile);
}
}
for (String className : receiverNames) {
try {
SpanReceiver rcvr = loadInstance(className, EMPTY);
Trace.addReceiver(rcvr);
receivers.put(highestId++, rcvr);
LOG.info("Loaded SpanReceiver " + className + " successfully.");
} catch (IOException e) {
LOG.error("Failed to load SpanReceiver", e);
}
}
}
private synchronized SpanReceiver loadInstance(String className,
List<ConfigurationPair> extraConfig) throws IOException {
SpanReceiverBuilder builder =
new SpanReceiverBuilder(TraceUtils.
wrapHadoopConf(confPrefix, config, extraConfig));
SpanReceiver rcvr = builder.spanReceiverClass(className.trim()).build();
if (rcvr == null) {
throw new IOException("Failed to load SpanReceiver " + className);
}
return rcvr;
}
/**
* Calls close() on all SpanReceivers created by this SpanReceiverHost.
*/
public synchronized void closeReceivers() {
if (closed) return;
closed = true;
for (SpanReceiver rcvr : receivers.values()) {
try {
rcvr.close();
} catch (IOException e) {
LOG.warn("Unable to close SpanReceiver correctly: " + e.getMessage(), e);
}
}
receivers.clear();
}
public synchronized SpanReceiverInfo[] listSpanReceivers()
throws IOException {
SpanReceiverInfo[] info = new SpanReceiverInfo[receivers.size()];
int i = 0;
for(Map.Entry<Long, SpanReceiver> entry : receivers.entrySet()) {
info[i] = new SpanReceiverInfo(entry.getKey(),
entry.getValue().getClass().getName());
i++;
}
return info;
}
public synchronized long addSpanReceiver(SpanReceiverInfo info)
throws IOException {
StringBuilder configStringBuilder = new StringBuilder();
String prefix = "";
for (ConfigurationPair pair : info.configPairs) {
configStringBuilder.append(prefix).append(pair.getKey()).
append(" = ").append(pair.getValue());
prefix = ", ";
}
SpanReceiver rcvr = null;
try {
rcvr = loadInstance(info.getClassName(), info.configPairs);
} catch (IOException e) {
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
" with configuration " + configStringBuilder.toString(), e);
throw e;
} catch (RuntimeException e) {
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
" with configuration " + configStringBuilder.toString(), e);
throw e;
}
Trace.addReceiver(rcvr);
long newId = highestId++;
receivers.put(newId, rcvr);
LOG.info("Successfully added SpanReceiver " + info.getClassName() +
" with configuration " + configStringBuilder.toString());
return newId;
}
public synchronized void removeSpanReceiver(long spanReceiverId)
throws IOException {
SpanReceiver rcvr = receivers.remove(spanReceiverId);
if (rcvr == null) {
throw new IOException("There is no span receiver with id " + spanReceiverId);
}
Trace.removeReceiver(rcvr);
rcvr.close();
LOG.info("Successfully removed SpanReceiver " + spanReceiverId +
" with class " + rcvr.getClass().getName());
}
}
| 7,701 | 35.851675 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SpanReceiverInfo {
private final long id;
private final String className;
final List<ConfigurationPair> configPairs =
new LinkedList<ConfigurationPair>();
static class ConfigurationPair {
private final String key;
private final String value;
ConfigurationPair(String key, String value) {
this.key = key;
this.value = value;
}
public String getKey() {
return key;
}
public String getValue() {
return value;
}
}
SpanReceiverInfo(long id, String className) {
this.id = id;
this.className = className;
}
public long getId() {
return id;
}
public String getClassName() {
return className;
}
}
| 1,777 | 26.353846 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.retry.AtMostOnce;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.tracing.TraceAdminPB.AddSpanReceiverRequestProto;
import org.apache.hadoop.tracing.TraceAdminPB.AddSpanReceiverResponseProto;
import org.apache.hadoop.tracing.TraceAdminPB.ListSpanReceiversRequestProto;
import org.apache.hadoop.tracing.TraceAdminPB.ListSpanReceiversResponseProto;
import org.apache.hadoop.tracing.TraceAdminPB.ConfigPair;
import org.apache.hadoop.tracing.TraceAdminPB.RemoveSpanReceiverRequestProto;
import org.apache.hadoop.tracing.TraceAdminPB.RemoveSpanReceiverResponseProto;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@InterfaceAudience.Private
public class TraceAdminProtocolServerSideTranslatorPB
implements TraceAdminProtocolPB, Closeable {
private final TraceAdminProtocol server;
public TraceAdminProtocolServerSideTranslatorPB(TraceAdminProtocol server) {
this.server = server;
}
@Override
public void close() throws IOException {
RPC.stopProxy(server);
}
@Override
public ListSpanReceiversResponseProto listSpanReceivers(
RpcController controller, ListSpanReceiversRequestProto req)
throws ServiceException {
try {
SpanReceiverInfo[] descs = server.listSpanReceivers();
ListSpanReceiversResponseProto.Builder bld =
ListSpanReceiversResponseProto.newBuilder();
for (int i = 0; i < descs.length; ++i) {
bld.addDescriptions(TraceAdminPB.SpanReceiverListInfo.newBuilder().
setId(descs[i].getId()).
setClassName(descs[i].getClassName()).build());
}
return bld.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public AddSpanReceiverResponseProto addSpanReceiver(
RpcController controller, AddSpanReceiverRequestProto req)
throws ServiceException {
try {
SpanReceiverInfoBuilder factory =
new SpanReceiverInfoBuilder(req.getClassName());
for (ConfigPair config : req.getConfigList()) {
factory.addConfigurationPair(config.getKey(), config.getValue());
}
long id = server.addSpanReceiver(factory.build());
return AddSpanReceiverResponseProto.newBuilder().setId(id).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public RemoveSpanReceiverResponseProto removeSpanReceiver(
RpcController controller, RemoveSpanReceiverRequestProto req)
throws ServiceException {
try {
server.removeSpanReceiver(req.getId());
return RemoveSpanReceiverResponseProto.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return TraceAdminProtocol.versionID;
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
if (!protocol.equals(RPC.getProtocolName(TraceAdminProtocolPB.class))) {
throw new IOException("Serverside implements " +
RPC.getProtocolName(TraceAdminProtocolPB.class) +
". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(TraceAdminProtocolPB.class),
TraceAdminProtocolPB.class);
}
}
| 4,546 | 37.210084 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
import org.apache.htrace.HTraceConfiguration;
/**
* This class provides utility functions for tracing.
*/
@InterfaceAudience.Private
public class TraceUtils {
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
public static HTraceConfiguration wrapHadoopConf(final String prefix,
final Configuration conf) {
return wrapHadoopConf(prefix, conf, EMPTY);
}
public static HTraceConfiguration wrapHadoopConf(final String prefix,
final Configuration conf, List<ConfigurationPair> extraConfig) {
final HashMap<String, String> extraMap = new HashMap<String, String>();
for (ConfigurationPair pair : extraConfig) {
extraMap.put(pair.getKey(), pair.getValue());
}
return new HTraceConfiguration() {
@Override
public String get(String key) {
return get(key, "");
}
@Override
public String get(String key, String defaultValue) {
String prefixedKey = prefix + key;
if (extraMap.containsKey(prefixedKey)) {
return extraMap.get(prefixedKey);
}
return conf.get(prefixedKey, defaultValue);
}
};
}
}
| 2,247 | 34.125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverInfoBuilder.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SpanReceiverInfoBuilder {
private SpanReceiverInfo info;
public SpanReceiverInfoBuilder(String className) {
info = new SpanReceiverInfo(0, className);
}
public void addConfigurationPair(String key, String value) {
info.configPairs.add(new ConfigurationPair(key, value));
}
public SpanReceiverInfo build() {
SpanReceiverInfo ret = info;
info = null;
return ret;
}
}
| 1,500 | 33.906977 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdminProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.tracing.TraceAdminPB.TraceAdminService;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@ProtocolInfo(
protocolName = "org.apache.hadoop.tracing.TraceAdminPB.TraceAdminService",
protocolVersion = 1)
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface TraceAdminProtocolPB extends
TraceAdminService.BlockingInterface, VersionedProtocol {
}
| 1,602 | 40.102564 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.util.Daemon;
import com.google.common.base.Preconditions;
/**
* This class is a daemon which runs in a loop, periodically heartbeating
* with an HA service. It is responsible for keeping track of that service's
* health and exposing callbacks to the failover controller when the health
* status changes.
*
* Classes which need callbacks should implement the {@link Callback}
* interface.
*/
@InterfaceAudience.Private
public class HealthMonitor {
private static final Log LOG = LogFactory.getLog(
HealthMonitor.class);
private Daemon daemon;
private long connectRetryInterval;
private long checkIntervalMillis;
private long sleepAfterDisconnectMillis;
private int rpcTimeout;
private volatile boolean shouldRun = true;
/** The connected proxy */
private HAServiceProtocol proxy;
/** The HA service to monitor */
private final HAServiceTarget targetToMonitor;
private final Configuration conf;
private State state = State.INITIALIZING;
/**
* Listeners for state changes
*/
private List<Callback> callbacks = Collections.synchronizedList(
new LinkedList<Callback>());
private List<ServiceStateCallback> serviceStateCallbacks = Collections
.synchronizedList(new LinkedList<ServiceStateCallback>());
private HAServiceStatus lastServiceState = new HAServiceStatus(
HAServiceState.INITIALIZING);
@InterfaceAudience.Private
public enum State {
/**
* The health monitor is still starting up.
*/
INITIALIZING,
/**
* The service is not responding to health check RPCs.
*/
SERVICE_NOT_RESPONDING,
/**
* The service is connected and healthy.
*/
SERVICE_HEALTHY,
/**
* The service is running but unhealthy.
*/
SERVICE_UNHEALTHY,
/**
* The health monitor itself failed unrecoverably and can
* no longer provide accurate information.
*/
HEALTH_MONITOR_FAILED;
}
HealthMonitor(Configuration conf, HAServiceTarget target) {
this.targetToMonitor = target;
this.conf = conf;
this.sleepAfterDisconnectMillis = conf.getLong(
HA_HM_SLEEP_AFTER_DISCONNECT_KEY,
HA_HM_SLEEP_AFTER_DISCONNECT_DEFAULT);
this.checkIntervalMillis = conf.getLong(
HA_HM_CHECK_INTERVAL_KEY,
HA_HM_CHECK_INTERVAL_DEFAULT);
this.connectRetryInterval = conf.getLong(
HA_HM_CONNECT_RETRY_INTERVAL_KEY,
HA_HM_CONNECT_RETRY_INTERVAL_DEFAULT);
this.rpcTimeout = conf.getInt(
HA_HM_RPC_TIMEOUT_KEY,
HA_HM_RPC_TIMEOUT_DEFAULT);
this.daemon = new MonitorDaemon();
}
public void addCallback(Callback cb) {
this.callbacks.add(cb);
}
public void removeCallback(Callback cb) {
callbacks.remove(cb);
}
public synchronized void addServiceStateCallback(ServiceStateCallback cb) {
this.serviceStateCallbacks.add(cb);
}
public synchronized void removeServiceStateCallback(ServiceStateCallback cb) {
serviceStateCallbacks.remove(cb);
}
public void shutdown() {
LOG.info("Stopping HealthMonitor thread");
shouldRun = false;
daemon.interrupt();
}
/**
* @return the current proxy object to the underlying service.
* Note that this may return null in the case that the service
* is not responding. Also note that, even if the last indicated
* state is healthy, the service may have gone down in the meantime.
*/
public synchronized HAServiceProtocol getProxy() {
return proxy;
}
private void loopUntilConnected() throws InterruptedException {
tryConnect();
while (proxy == null) {
Thread.sleep(connectRetryInterval);
tryConnect();
}
assert proxy != null;
}
private void tryConnect() {
Preconditions.checkState(proxy == null);
try {
synchronized (this) {
proxy = createProxy();
}
} catch (IOException e) {
LOG.warn("Could not connect to local service at " + targetToMonitor +
": " + e.getMessage());
proxy = null;
enterState(State.SERVICE_NOT_RESPONDING);
}
}
/**
* Connect to the service to be monitored. Stubbed out for easier testing.
*/
protected HAServiceProtocol createProxy() throws IOException {
return targetToMonitor.getProxy(conf, rpcTimeout);
}
private void doHealthChecks() throws InterruptedException {
while (shouldRun) {
HAServiceStatus status = null;
boolean healthy = false;
try {
status = proxy.getServiceStatus();
proxy.monitorHealth();
healthy = true;
} catch (Throwable t) {
if (isHealthCheckFailedException(t)) {
LOG.warn("Service health check failed for " + targetToMonitor
+ ": " + t.getMessage());
enterState(State.SERVICE_UNHEALTHY);
} else {
LOG.warn("Transport-level exception trying to monitor health of " +
targetToMonitor + ": " + t.getCause() + " " + t.getLocalizedMessage());
RPC.stopProxy(proxy);
proxy = null;
enterState(State.SERVICE_NOT_RESPONDING);
Thread.sleep(sleepAfterDisconnectMillis);
return;
}
}
if (status != null) {
setLastServiceStatus(status);
}
if (healthy) {
enterState(State.SERVICE_HEALTHY);
}
Thread.sleep(checkIntervalMillis);
}
}
private boolean isHealthCheckFailedException(Throwable t) {
return ((t instanceof HealthCheckFailedException) ||
(t instanceof RemoteException &&
((RemoteException)t).unwrapRemoteException(
HealthCheckFailedException.class) instanceof
HealthCheckFailedException));
}
private synchronized void setLastServiceStatus(HAServiceStatus status) {
this.lastServiceState = status;
for (ServiceStateCallback cb : serviceStateCallbacks) {
cb.reportServiceStatus(lastServiceState);
}
}
private synchronized void enterState(State newState) {
if (newState != state) {
LOG.info("Entering state " + newState);
state = newState;
synchronized (callbacks) {
for (Callback cb : callbacks) {
cb.enteredState(newState);
}
}
}
}
synchronized State getHealthState() {
return state;
}
synchronized HAServiceStatus getLastServiceStatus() {
return lastServiceState;
}
boolean isAlive() {
return daemon.isAlive();
}
void join() throws InterruptedException {
daemon.join();
}
void start() {
daemon.start();
}
private class MonitorDaemon extends Daemon {
private MonitorDaemon() {
super();
setName("Health Monitor for " + targetToMonitor);
setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
LOG.fatal("Health monitor failed", e);
enterState(HealthMonitor.State.HEALTH_MONITOR_FAILED);
}
});
}
@Override
public void run() {
while (shouldRun) {
try {
loopUntilConnected();
doHealthChecks();
} catch (InterruptedException ie) {
Preconditions.checkState(!shouldRun,
"Interrupted but still supposed to run");
}
}
}
}
/**
* Callback interface for state change events.
*
* This interface is called from a single thread which also performs
* the health monitoring. If the callback processing takes a long time,
* no further health checks will be made during this period, nor will
* other registered callbacks be called.
*
* If the callback itself throws an unchecked exception, no other
* callbacks following it will be called, and the health monitor
* will terminate, entering HEALTH_MONITOR_FAILED state.
*/
static interface Callback {
void enteredState(State newState);
}
/**
* Callback interface for service states.
*/
static interface ServiceStateCallback {
void reportServiceStatus(HAServiceStatus status);
}
}
| 9,542 | 28.006079 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.util.ZKUtil;
import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
import org.apache.hadoop.ha.HealthMonitor.State;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.hadoop.util.ToolRunner;
import org.apache.zookeeper.data.ACL;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
@InterfaceAudience.LimitedPrivate("HDFS")
public abstract class ZKFailoverController {
static final Log LOG = LogFactory.getLog(ZKFailoverController.class);
public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5*1000;
private static final String ZK_PARENT_ZNODE_KEY = "ha.zookeeper.parent-znode";
public static final String ZK_ACL_KEY = "ha.zookeeper.acl";
private static final String ZK_ACL_DEFAULT = "world:anyone:rwcda";
public static final String ZK_AUTH_KEY = "ha.zookeeper.auth";
static final String ZK_PARENT_ZNODE_DEFAULT = "/hadoop-ha";
/**
* All of the conf keys used by the ZKFC. This is used in order to allow
* them to be overridden on a per-nameservice or per-namenode basis.
*/
protected static final String[] ZKFC_CONF_KEYS = new String[] {
ZK_QUORUM_KEY,
ZK_SESSION_TIMEOUT_KEY,
ZK_PARENT_ZNODE_KEY,
ZK_ACL_KEY,
ZK_AUTH_KEY
};
protected static final String USAGE =
"Usage: hdfs zkfc [ -formatZK [-force] [-nonInteractive] ]";
/** Unable to format the parent znode in ZK */
static final int ERR_CODE_FORMAT_DENIED = 2;
/** The parent znode doesn't exist in ZK */
static final int ERR_CODE_NO_PARENT_ZNODE = 3;
/** Fencing is not properly configured */
static final int ERR_CODE_NO_FENCER = 4;
/** Automatic failover is not enabled */
static final int ERR_CODE_AUTO_FAILOVER_NOT_ENABLED = 5;
/** Cannot connect to ZooKeeper */
static final int ERR_CODE_NO_ZK = 6;
protected Configuration conf;
private String zkQuorum;
protected final HAServiceTarget localTarget;
private HealthMonitor healthMonitor;
private ActiveStandbyElector elector;
protected ZKFCRpcServer rpcServer;
private State lastHealthState = State.INITIALIZING;
private volatile HAServiceState serviceState = HAServiceState.INITIALIZING;
/** Set if a fatal error occurs */
private String fatalError = null;
/**
* A future nanotime before which the ZKFC will not join the election.
* This is used during graceful failover.
*/
private long delayJoiningUntilNanotime = 0;
/** Executor on which {@link #scheduleRecheck(long)} schedules events */
private ScheduledExecutorService delayExecutor =
Executors.newScheduledThreadPool(1,
new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("ZKFC Delay timer #%d")
.build());
private ActiveAttemptRecord lastActiveAttemptRecord;
private Object activeAttemptRecordLock = new Object();
protected ZKFailoverController(Configuration conf, HAServiceTarget localTarget) {
this.localTarget = localTarget;
this.conf = conf;
}
protected abstract byte[] targetToData(HAServiceTarget target);
protected abstract HAServiceTarget dataToTarget(byte[] data);
protected abstract void loginAsFCUser() throws IOException;
protected abstract void checkRpcAdminAccess()
throws AccessControlException, IOException;
protected abstract InetSocketAddress getRpcAddressToBindTo();
protected abstract PolicyProvider getPolicyProvider();
/**
* Return the name of a znode inside the configured parent znode in which
* the ZKFC will do all of its work. This is so that multiple federated
* nameservices can run on the same ZK quorum without having to manually
* configure them to separate subdirectories.
*/
protected abstract String getScopeInsideParentNode();
public HAServiceTarget getLocalTarget() {
return localTarget;
}
HAServiceState getServiceState() { return serviceState; }
public int run(final String[] args) throws Exception {
if (!localTarget.isAutoFailoverEnabled()) {
LOG.fatal("Automatic failover is not enabled for " + localTarget + "." +
" Please ensure that automatic failover is enabled in the " +
"configuration before running the ZK failover controller.");
return ERR_CODE_AUTO_FAILOVER_NOT_ENABLED;
}
loginAsFCUser();
try {
return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
return doRun(args);
} catch (Exception t) {
throw new RuntimeException(t);
} finally {
if (elector != null) {
elector.terminateConnection();
}
}
}
});
} catch (RuntimeException rte) {
LOG.fatal("The failover controller encounters runtime error: " + rte);
throw (Exception)rte.getCause();
}
}
private int doRun(String[] args)
throws HadoopIllegalArgumentException, IOException, InterruptedException {
try {
initZK();
} catch (KeeperException ke) {
LOG.fatal("Unable to start failover controller. Unable to connect "
+ "to ZooKeeper quorum at " + zkQuorum + ". Please check the "
+ "configured value for " + ZK_QUORUM_KEY + " and ensure that "
+ "ZooKeeper is running.");
return ERR_CODE_NO_ZK;
}
if (args.length > 0) {
if ("-formatZK".equals(args[0])) {
boolean force = false;
boolean interactive = true;
for (int i = 1; i < args.length; i++) {
if ("-force".equals(args[i])) {
force = true;
} else if ("-nonInteractive".equals(args[i])) {
interactive = false;
} else {
badArg(args[i]);
}
}
return formatZK(force, interactive);
} else {
badArg(args[0]);
}
}
if (!elector.parentZNodeExists()) {
LOG.fatal("Unable to start failover controller. "
+ "Parent znode does not exist.\n"
+ "Run with -formatZK flag to initialize ZooKeeper.");
return ERR_CODE_NO_PARENT_ZNODE;
}
try {
localTarget.checkFencingConfigured();
} catch (BadFencingConfigurationException e) {
LOG.fatal("Fencing is not configured for " + localTarget + ".\n" +
"You must configure a fencing method before using automatic " +
"failover.", e);
return ERR_CODE_NO_FENCER;
}
initRPC();
initHM();
startRPC();
try {
mainLoop();
} finally {
rpcServer.stopAndJoin();
elector.quitElection(true);
healthMonitor.shutdown();
healthMonitor.join();
}
return 0;
}
private void badArg(String arg) {
printUsage();
throw new HadoopIllegalArgumentException(
"Bad argument: " + arg);
}
private void printUsage() {
System.err.println(USAGE + "\n");
}
private int formatZK(boolean force, boolean interactive)
throws IOException, InterruptedException {
if (elector.parentZNodeExists()) {
if (!force && (!interactive || !confirmFormat())) {
return ERR_CODE_FORMAT_DENIED;
}
try {
elector.clearParentZNode();
} catch (IOException e) {
LOG.error("Unable to clear zk parent znode", e);
return 1;
}
}
elector.ensureParentZNode();
return 0;
}
private boolean confirmFormat() {
String parentZnode = getParentZnode();
System.err.println(
"===============================================\n" +
"The configured parent znode " + parentZnode + " already exists.\n" +
"Are you sure you want to clear all failover information from\n" +
"ZooKeeper?\n" +
"WARNING: Before proceeding, ensure that all HDFS services and\n" +
"failover controllers are stopped!\n" +
"===============================================");
try {
return ToolRunner.confirmPrompt("Proceed formatting " + parentZnode + "?");
} catch (IOException e) {
LOG.debug("Failed to confirm", e);
return false;
}
}
// ------------------------------------------
// Begin actual guts of failover controller
// ------------------------------------------
private void initHM() {
healthMonitor = new HealthMonitor(conf, localTarget);
healthMonitor.addCallback(new HealthCallbacks());
healthMonitor.addServiceStateCallback(new ServiceStateCallBacks());
healthMonitor.start();
}
protected void initRPC() throws IOException {
InetSocketAddress bindAddr = getRpcAddressToBindTo();
rpcServer = new ZKFCRpcServer(conf, bindAddr, this, getPolicyProvider());
}
protected void startRPC() throws IOException {
rpcServer.start();
}
private void initZK() throws HadoopIllegalArgumentException, IOException,
KeeperException {
zkQuorum = conf.get(ZK_QUORUM_KEY);
int zkTimeout = conf.getInt(ZK_SESSION_TIMEOUT_KEY,
ZK_SESSION_TIMEOUT_DEFAULT);
// Parse ACLs from configuration.
String zkAclConf = conf.get(ZK_ACL_KEY, ZK_ACL_DEFAULT);
zkAclConf = ZKUtil.resolveConfIndirection(zkAclConf);
List<ACL> zkAcls = ZKUtil.parseACLs(zkAclConf);
if (zkAcls.isEmpty()) {
zkAcls = Ids.CREATOR_ALL_ACL;
}
// Parse authentication from configuration.
String zkAuthConf = conf.get(ZK_AUTH_KEY);
zkAuthConf = ZKUtil.resolveConfIndirection(zkAuthConf);
List<ZKAuthInfo> zkAuths;
if (zkAuthConf != null) {
zkAuths = ZKUtil.parseAuth(zkAuthConf);
} else {
zkAuths = Collections.emptyList();
}
// Sanity check configuration.
Preconditions.checkArgument(zkQuorum != null,
"Missing required configuration '%s' for ZooKeeper quorum",
ZK_QUORUM_KEY);
Preconditions.checkArgument(zkTimeout > 0,
"Invalid ZK session timeout %s", zkTimeout);
int maxRetryNum = conf.getInt(
CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_KEY,
CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
elector = new ActiveStandbyElector(zkQuorum,
zkTimeout, getParentZnode(), zkAcls, zkAuths,
new ElectorCallbacks(), maxRetryNum);
}
private String getParentZnode() {
String znode = conf.get(ZK_PARENT_ZNODE_KEY,
ZK_PARENT_ZNODE_DEFAULT);
if (!znode.endsWith("/")) {
znode += "/";
}
return znode + getScopeInsideParentNode();
}
private synchronized void mainLoop() throws InterruptedException {
while (fatalError == null) {
wait();
}
assert fatalError != null; // only get here on fatal
throw new RuntimeException(
"ZK Failover Controller failed: " + fatalError);
}
private synchronized void fatalError(String err) {
LOG.fatal("Fatal error occurred:" + err);
fatalError = err;
notifyAll();
}
private synchronized void becomeActive() throws ServiceFailedException {
LOG.info("Trying to make " + localTarget + " active...");
try {
HAServiceProtocolHelper.transitionToActive(localTarget.getProxy(
conf, FailoverController.getRpcTimeoutToNewActive(conf)),
createReqInfo());
String msg = "Successfully transitioned " + localTarget +
" to active state";
LOG.info(msg);
serviceState = HAServiceState.ACTIVE;
recordActiveAttempt(new ActiveAttemptRecord(true, msg));
} catch (Throwable t) {
String msg = "Couldn't make " + localTarget + " active";
LOG.fatal(msg, t);
recordActiveAttempt(new ActiveAttemptRecord(false, msg + "\n" +
StringUtils.stringifyException(t)));
if (t instanceof ServiceFailedException) {
throw (ServiceFailedException)t;
} else {
throw new ServiceFailedException("Couldn't transition to active",
t);
}
/*
* TODO:
* we need to make sure that if we get fenced and then quickly restarted,
* none of these calls will retry across the restart boundary
* perhaps the solution is that, whenever the nn starts, it gets a unique
* ID, and when we start becoming active, we record it, and then any future
* calls use the same ID
*/
}
}
/**
* Store the results of the last attempt to become active.
* This is used so that, during manually initiated failover,
* we can report back the results of the attempt to become active
* to the initiator of the failover.
*/
private void recordActiveAttempt(
ActiveAttemptRecord record) {
synchronized (activeAttemptRecordLock) {
lastActiveAttemptRecord = record;
activeAttemptRecordLock.notifyAll();
}
}
/**
* Wait until one of the following events:
* <ul>
* <li>Another thread publishes the results of an attempt to become active
* using {@link #recordActiveAttempt(ActiveAttemptRecord)}</li>
* <li>The node enters bad health status</li>
* <li>The specified timeout elapses</li>
* </ul>
*
* @param timeoutMillis number of millis to wait
* @return the published record, or null if the timeout elapses or the
* service becomes unhealthy
* @throws InterruptedException if the thread is interrupted.
*/
private ActiveAttemptRecord waitForActiveAttempt(int timeoutMillis)
throws InterruptedException {
long st = System.nanoTime();
long waitUntil = st + TimeUnit.NANOSECONDS.convert(
timeoutMillis, TimeUnit.MILLISECONDS);
do {
// periodically check health state, because entering an
// unhealthy state could prevent us from ever attempting to
// become active. We can detect this and respond to the user
// immediately.
synchronized (this) {
if (lastHealthState != State.SERVICE_HEALTHY) {
// early out if service became unhealthy
return null;
}
}
synchronized (activeAttemptRecordLock) {
if ((lastActiveAttemptRecord != null &&
lastActiveAttemptRecord.nanoTime >= st)) {
return lastActiveAttemptRecord;
}
// Only wait 1sec so that we periodically recheck the health state
// above.
activeAttemptRecordLock.wait(1000);
}
} while (System.nanoTime() < waitUntil);
// Timeout elapsed.
LOG.warn(timeoutMillis + "ms timeout elapsed waiting for an attempt " +
"to become active");
return null;
}
private StateChangeRequestInfo createReqInfo() {
return new StateChangeRequestInfo(RequestSource.REQUEST_BY_ZKFC);
}
private synchronized void becomeStandby() {
LOG.info("ZK Election indicated that " + localTarget +
" should become standby");
try {
int timeout = FailoverController.getGracefulFenceTimeout(conf);
localTarget.getProxy(conf, timeout).transitionToStandby(createReqInfo());
LOG.info("Successfully transitioned " + localTarget +
" to standby state");
} catch (Exception e) {
LOG.error("Couldn't transition " + localTarget + " to standby state",
e);
// TODO handle this. It's a likely case since we probably got fenced
// at the same time.
}
serviceState = HAServiceState.STANDBY;
}
private synchronized void fenceOldActive(byte[] data) {
HAServiceTarget target = dataToTarget(data);
try {
doFence(target);
} catch (Throwable t) {
recordActiveAttempt(new ActiveAttemptRecord(false, "Unable to fence old active: " + StringUtils.stringifyException(t)));
Throwables.propagate(t);
}
}
private void doFence(HAServiceTarget target) {
LOG.info("Should fence: " + target);
boolean gracefulWorked = new FailoverController(conf,
RequestSource.REQUEST_BY_ZKFC).tryGracefulFence(target);
if (gracefulWorked) {
// It's possible that it's in standby but just about to go into active,
// no? Is there some race here?
LOG.info("Successfully transitioned " + target + " to standby " +
"state without fencing");
return;
}
try {
target.checkFencingConfigured();
} catch (BadFencingConfigurationException e) {
LOG.error("Couldn't fence old active " + target, e);
recordActiveAttempt(new ActiveAttemptRecord(false, "Unable to fence old active"));
throw new RuntimeException(e);
}
if (!target.getFencer().fence(target)) {
throw new RuntimeException("Unable to fence " + target);
}
}
/**
* Request from graceful failover to cede active role. Causes
* this ZKFC to transition its local node to standby, then quit
* the election for the specified period of time, after which it
* will rejoin iff it is healthy.
*/
void cedeActive(final int millisToCede)
throws AccessControlException, ServiceFailedException, IOException {
try {
UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
doCedeActive(millisToCede);
return null;
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
}
private void doCedeActive(int millisToCede)
throws AccessControlException, ServiceFailedException, IOException {
int timeout = FailoverController.getGracefulFenceTimeout(conf);
// Lock elector to maintain lock ordering of elector -> ZKFC
synchronized (elector) {
synchronized (this) {
if (millisToCede <= 0) {
delayJoiningUntilNanotime = 0;
recheckElectability();
return;
}
LOG.info("Requested by " + UserGroupInformation.getCurrentUser() +
" at " + Server.getRemoteAddress() + " to cede active role.");
boolean needFence = false;
try {
localTarget.getProxy(conf, timeout).transitionToStandby(createReqInfo());
LOG.info("Successfully ensured local node is in standby mode");
} catch (IOException ioe) {
LOG.warn("Unable to transition local node to standby: " +
ioe.getLocalizedMessage());
LOG.warn("Quitting election but indicating that fencing is " +
"necessary");
needFence = true;
}
delayJoiningUntilNanotime = System.nanoTime() +
TimeUnit.MILLISECONDS.toNanos(millisToCede);
elector.quitElection(needFence);
serviceState = HAServiceState.INITIALIZING;
}
}
recheckElectability();
}
/**
* Coordinate a graceful failover to this node.
* @throws ServiceFailedException if the node fails to become active
* @throws IOException some other error occurs
*/
void gracefulFailoverToYou() throws ServiceFailedException, IOException {
try {
UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
doGracefulFailover();
return null;
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
}
/**
* Coordinate a graceful failover. This proceeds in several phases:
* 1) Pre-flight checks: ensure that the local node is healthy, and
* thus a candidate for failover.
* 2) Determine the current active node. If it is the local node, no
* need to failover - return success.
* 3) Ask that node to yield from the election for a number of seconds.
* 4) Allow the normal election path to run in other threads. Wait until
* we either become unhealthy or we see an election attempt recorded by
* the normal code path.
* 5) Allow the old active to rejoin the election, so a future
* failback is possible.
*/
private void doGracefulFailover()
throws ServiceFailedException, IOException, InterruptedException {
int timeout = FailoverController.getGracefulFenceTimeout(conf) * 2;
// Phase 1: pre-flight checks
checkEligibleForFailover();
// Phase 2: determine old/current active node. Check that we're not
// ourselves active, etc.
HAServiceTarget oldActive = getCurrentActive();
if (oldActive == null) {
// No node is currently active. So, if we aren't already
// active ourselves by means of a normal election, then there's
// probably something preventing us from becoming active.
throw new ServiceFailedException(
"No other node is currently active.");
}
if (oldActive.getAddress().equals(localTarget.getAddress())) {
LOG.info("Local node " + localTarget + " is already active. " +
"No need to failover. Returning success.");
return;
}
// Phase 3: ask the old active to yield from the election.
LOG.info("Asking " + oldActive + " to cede its active state for " +
timeout + "ms");
ZKFCProtocol oldZkfc = oldActive.getZKFCProxy(conf, timeout);
oldZkfc.cedeActive(timeout);
// Phase 4: wait for the normal election to make the local node
// active.
ActiveAttemptRecord attempt = waitForActiveAttempt(timeout + 60000);
if (attempt == null) {
// We didn't even make an attempt to become active.
synchronized(this) {
if (lastHealthState != State.SERVICE_HEALTHY) {
throw new ServiceFailedException("Unable to become active. " +
"Service became unhealthy while trying to failover.");
}
}
throw new ServiceFailedException("Unable to become active. " +
"Local node did not get an opportunity to do so from ZooKeeper, " +
"or the local node took too long to transition to active.");
}
// Phase 5. At this point, we made some attempt to become active. So we
// can tell the old active to rejoin if it wants. This allows a quick
// fail-back if we immediately crash.
oldZkfc.cedeActive(-1);
if (attempt.succeeded) {
LOG.info("Successfully became active. " + attempt.status);
} else {
// Propagate failure
String msg = "Failed to become active. " + attempt.status;
throw new ServiceFailedException(msg);
}
}
/**
* Ensure that the local node is in a healthy state, and thus
* eligible for graceful failover.
* @throws ServiceFailedException if the node is unhealthy
*/
private synchronized void checkEligibleForFailover()
throws ServiceFailedException {
// Check health
if (this.getLastHealthState() != State.SERVICE_HEALTHY) {
throw new ServiceFailedException(
localTarget + " is not currently healthy. " +
"Cannot be failover target");
}
}
/**
* @return an {@link HAServiceTarget} for the current active node
* in the cluster, or null if no node is active.
* @throws IOException if a ZK-related issue occurs
* @throws InterruptedException if thread is interrupted
*/
private HAServiceTarget getCurrentActive()
throws IOException, InterruptedException {
synchronized (elector) {
synchronized (this) {
byte[] activeData;
try {
activeData = elector.getActiveData();
} catch (ActiveNotFoundException e) {
return null;
} catch (KeeperException ke) {
throw new IOException(
"Unexpected ZooKeeper issue fetching active node info", ke);
}
HAServiceTarget oldActive = dataToTarget(activeData);
return oldActive;
}
}
}
/**
* Check the current state of the service, and join the election
* if it should be in the election.
*/
private void recheckElectability() {
// Maintain lock ordering of elector -> ZKFC
synchronized (elector) {
synchronized (this) {
boolean healthy = lastHealthState == State.SERVICE_HEALTHY;
long remainingDelay = delayJoiningUntilNanotime - System.nanoTime();
if (remainingDelay > 0) {
if (healthy) {
LOG.info("Would have joined master election, but this node is " +
"prohibited from doing so for " +
TimeUnit.NANOSECONDS.toMillis(remainingDelay) + " more ms");
}
scheduleRecheck(remainingDelay);
return;
}
switch (lastHealthState) {
case SERVICE_HEALTHY:
elector.joinElection(targetToData(localTarget));
if (quitElectionOnBadState) {
quitElectionOnBadState = false;
}
break;
case INITIALIZING:
LOG.info("Ensuring that " + localTarget + " does not " +
"participate in active master election");
elector.quitElection(false);
serviceState = HAServiceState.INITIALIZING;
break;
case SERVICE_UNHEALTHY:
case SERVICE_NOT_RESPONDING:
LOG.info("Quitting master election for " + localTarget +
" and marking that fencing is necessary");
elector.quitElection(true);
serviceState = HAServiceState.INITIALIZING;
break;
case HEALTH_MONITOR_FAILED:
fatalError("Health monitor failed!");
break;
default:
throw new IllegalArgumentException("Unhandled state:" + lastHealthState);
}
}
}
}
/**
* Schedule a call to {@link #recheckElectability()} in the future.
*/
private void scheduleRecheck(long whenNanos) {
delayExecutor.schedule(
new Runnable() {
@Override
public void run() {
try {
recheckElectability();
} catch (Throwable t) {
fatalError("Failed to recheck electability: " +
StringUtils.stringifyException(t));
}
}
},
whenNanos, TimeUnit.NANOSECONDS);
}
int serviceStateMismatchCount = 0;
boolean quitElectionOnBadState = false;
void verifyChangedServiceState(HAServiceState changedState) {
synchronized (elector) {
synchronized (this) {
if (serviceState == HAServiceState.INITIALIZING) {
if (quitElectionOnBadState) {
LOG.debug("rechecking for electability from bad state");
recheckElectability();
}
return;
}
if (changedState == serviceState) {
serviceStateMismatchCount = 0;
return;
}
if (serviceStateMismatchCount == 0) {
// recheck one more time. As this might be due to parallel transition.
serviceStateMismatchCount++;
return;
}
// quit the election as the expected state and reported state
// mismatches.
LOG.error("Local service " + localTarget
+ " has changed the serviceState to " + changedState
+ ". Expected was " + serviceState
+ ". Quitting election marking fencing necessary.");
delayJoiningUntilNanotime = System.nanoTime()
+ TimeUnit.MILLISECONDS.toNanos(1000);
elector.quitElection(true);
quitElectionOnBadState = true;
serviceStateMismatchCount = 0;
serviceState = HAServiceState.INITIALIZING;
}
}
}
/**
* @return the last health state passed to the FC
* by the HealthMonitor.
*/
protected synchronized State getLastHealthState() {
return lastHealthState;
}
protected synchronized void setLastHealthState(HealthMonitor.State newState) {
LOG.info("Local service " + localTarget +
" entered state: " + newState);
lastHealthState = newState;
}
@VisibleForTesting
ActiveStandbyElector getElectorForTests() {
return elector;
}
@VisibleForTesting
ZKFCRpcServer getRpcServerForTests() {
return rpcServer;
}
/**
* Callbacks from elector
*/
class ElectorCallbacks implements ActiveStandbyElectorCallback {
@Override
public void becomeActive() throws ServiceFailedException {
ZKFailoverController.this.becomeActive();
}
@Override
public void becomeStandby() {
ZKFailoverController.this.becomeStandby();
}
@Override
public void enterNeutralMode() {
}
@Override
public void notifyFatalError(String errorMessage) {
fatalError(errorMessage);
}
@Override
public void fenceOldActive(byte[] data) {
ZKFailoverController.this.fenceOldActive(data);
}
@Override
public String toString() {
synchronized (ZKFailoverController.this) {
return "Elector callbacks for " + localTarget;
}
}
}
/**
* Callbacks from HealthMonitor
*/
class HealthCallbacks implements HealthMonitor.Callback {
@Override
public void enteredState(HealthMonitor.State newState) {
setLastHealthState(newState);
recheckElectability();
}
}
/**
* Callbacks for HAServiceStatus
*/
class ServiceStateCallBacks implements HealthMonitor.ServiceStateCallback {
@Override
public void reportServiceStatus(HAServiceStatus status) {
verifyChangedServiceState(status.getState());
}
}
private static class ActiveAttemptRecord {
private final boolean succeeded;
private final String status;
private final long nanoTime;
public ActiveAttemptRecord(boolean succeeded, String status) {
this.succeeded = succeeded;
this.status = status;
this.nanoTime = System.nanoTime();
}
}
}
| 31,767 | 32.903949 | 126 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/BadFencingConfigurationException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Indicates that the operator has specified an invalid configuration
* for fencing methods.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class BadFencingConfigurationException extends IOException {
private static final long serialVersionUID = 1L;
public BadFencingConfigurationException(String msg) {
super(msg);
}
public BadFencingConfigurationException(String msg, Throwable cause) {
super(msg, cause);
}
}
| 1,443 | 33.380952 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ipc.RPC;
import com.google.common.base.Preconditions;
/**
* The FailOverController is responsible for electing an active service
* on startup or when the current active is changing (eg due to failure),
* monitoring the health of a service, and performing a fail-over when a
* new active service is either manually selected by a user or elected.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class FailoverController {
private static final Log LOG = LogFactory.getLog(FailoverController.class);
private final int gracefulFenceTimeout;
private final int rpcTimeoutToNewActive;
private final Configuration conf;
/*
* Need a copy of conf for graceful fence to set
* configurable retries for IPC client.
* Refer HDFS-3561
*/
private final Configuration gracefulFenceConf;
private final RequestSource requestSource;
public FailoverController(Configuration conf,
RequestSource source) {
this.conf = conf;
this.gracefulFenceConf = new Configuration(conf);
this.requestSource = source;
this.gracefulFenceTimeout = getGracefulFenceTimeout(conf);
this.rpcTimeoutToNewActive = getRpcTimeoutToNewActive(conf);
//Configure less retries for graceful fence
int gracefulFenceConnectRetries = conf.getInt(
CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES,
CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES_DEFAULT);
gracefulFenceConf.setInt(
CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
gracefulFenceConnectRetries);
gracefulFenceConf.setInt(
CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
gracefulFenceConnectRetries);
}
static int getGracefulFenceTimeout(Configuration conf) {
return conf.getInt(
CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_KEY,
CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT);
}
static int getRpcTimeoutToNewActive(Configuration conf) {
return conf.getInt(
CommonConfigurationKeys.HA_FC_NEW_ACTIVE_TIMEOUT_KEY,
CommonConfigurationKeys.HA_FC_NEW_ACTIVE_TIMEOUT_DEFAULT);
}
/**
* Perform pre-failover checks on the given service we plan to
* failover to, eg to prevent failing over to a service (eg due
* to it being inaccessible, already active, not healthy, etc).
*
* An option to ignore toSvc if it claims it is not ready to
* become active is provided in case performing a failover will
* allow it to become active, eg because it triggers a log roll
* so the standby can learn about new blocks and leave safemode.
*
* @param from currently active service
* @param target service to make active
* @param forceActive ignore toSvc if it reports that it is not ready
* @throws FailoverFailedException if we should avoid failover
*/
private void preFailoverChecks(HAServiceTarget from,
HAServiceTarget target,
boolean forceActive)
throws FailoverFailedException {
HAServiceStatus toSvcStatus;
HAServiceProtocol toSvc;
if (from.getAddress().equals(target.getAddress())) {
throw new FailoverFailedException(
"Can't failover a service to itself");
}
try {
toSvc = target.getProxy(conf, rpcTimeoutToNewActive);
toSvcStatus = toSvc.getServiceStatus();
} catch (IOException e) {
String msg = "Unable to get service state for " + target;
LOG.error(msg + ": " + e.getLocalizedMessage());
throw new FailoverFailedException(msg, e);
}
if (!toSvcStatus.getState().equals(HAServiceState.STANDBY)) {
throw new FailoverFailedException(
"Can't failover to an active service");
}
if (!toSvcStatus.isReadyToBecomeActive()) {
String notReadyReason = toSvcStatus.getNotReadyReason();
if (!forceActive) {
throw new FailoverFailedException(
target + " is not ready to become active: " +
notReadyReason);
} else {
LOG.warn("Service is not ready to become active, but forcing: " +
notReadyReason);
}
}
try {
HAServiceProtocolHelper.monitorHealth(toSvc, createReqInfo());
} catch (HealthCheckFailedException hce) {
throw new FailoverFailedException(
"Can't failover to an unhealthy service", hce);
} catch (IOException e) {
throw new FailoverFailedException(
"Got an IO exception", e);
}
}
private StateChangeRequestInfo createReqInfo() {
return new StateChangeRequestInfo(requestSource);
}
/**
* Try to get the HA state of the node at the given address. This
* function is guaranteed to be "quick" -- ie it has a short timeout
* and no retries. Its only purpose is to avoid fencing a node that
* has already restarted.
*/
boolean tryGracefulFence(HAServiceTarget svc) {
HAServiceProtocol proxy = null;
try {
proxy = svc.getProxy(gracefulFenceConf, gracefulFenceTimeout);
proxy.transitionToStandby(createReqInfo());
return true;
} catch (ServiceFailedException sfe) {
LOG.warn("Unable to gracefully make " + svc + " standby (" +
sfe.getMessage() + ")");
} catch (IOException ioe) {
LOG.warn("Unable to gracefully make " + svc +
" standby (unable to connect)", ioe);
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
return false;
}
/**
* Failover from service 1 to service 2. If the failover fails
* then try to failback.
*
* @param fromSvc currently active service
* @param toSvc service to make active
* @param forceFence to fence fromSvc even if not strictly necessary
* @param forceActive try to make toSvc active even if it is not ready
* @throws FailoverFailedException if the failover fails
*/
public void failover(HAServiceTarget fromSvc,
HAServiceTarget toSvc,
boolean forceFence,
boolean forceActive)
throws FailoverFailedException {
Preconditions.checkArgument(fromSvc.getFencer() != null,
"failover requires a fencer");
preFailoverChecks(fromSvc, toSvc, forceActive);
// Try to make fromSvc standby
boolean tryFence = true;
if (tryGracefulFence(fromSvc)) {
tryFence = forceFence;
}
// Fence fromSvc if it's required or forced by the user
if (tryFence) {
if (!fromSvc.getFencer().fence(fromSvc)) {
throw new FailoverFailedException("Unable to fence " +
fromSvc + ". Fencing failed.");
}
}
// Try to make toSvc active
boolean failed = false;
Throwable cause = null;
try {
HAServiceProtocolHelper.transitionToActive(
toSvc.getProxy(conf, rpcTimeoutToNewActive),
createReqInfo());
} catch (ServiceFailedException sfe) {
LOG.error("Unable to make " + toSvc + " active (" +
sfe.getMessage() + "). Failing back.");
failed = true;
cause = sfe;
} catch (IOException ioe) {
LOG.error("Unable to make " + toSvc +
" active (unable to connect). Failing back.", ioe);
failed = true;
cause = ioe;
}
// We failed to make toSvc active
if (failed) {
String msg = "Unable to failover to " + toSvc;
// Only try to failback if we didn't fence fromSvc
if (!tryFence) {
try {
// Unconditionally fence toSvc in case it is still trying to
// become active, eg we timed out waiting for its response.
// Unconditionally force fromSvc to become active since it
// was previously active when we initiated failover.
failover(toSvc, fromSvc, true, true);
} catch (FailoverFailedException ffe) {
msg += ". Failback to " + fromSvc +
" failed (" + ffe.getMessage() + ")";
LOG.fatal(msg);
}
}
throw new FailoverFailedException(msg, cause);
}
}
}
| 9,473 | 35.160305 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
/**
* This class parses the configured list of fencing methods, and
* is responsible for trying each one in turn while logging informative
* output.<p>
*
* The fencing methods are configured as a carriage-return separated list.
* Each line in the list is of the form:<p>
* <code>com.example.foo.MyMethod(arg string)</code>
* or
* <code>com.example.foo.MyMethod</code>
* The class provided must implement the {@link FenceMethod} interface.
* The fencing methods that ship with Hadoop may also be referred to
* by shortened names:<p>
* <ul>
* <li><code>shell(/path/to/some/script.sh args...)</code></li>
* <li><code>sshfence(...)</code> (see {@link SshFenceByTcpPort})
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class NodeFencer {
private static final String CLASS_RE = "([a-zA-Z0-9\\.\\$]+)";
private static final Pattern CLASS_WITH_ARGUMENT =
Pattern.compile(CLASS_RE + "\\((.+?)\\)");
private static final Pattern CLASS_WITHOUT_ARGUMENT =
Pattern.compile(CLASS_RE);
private static final Pattern HASH_COMMENT_RE =
Pattern.compile("#.*$");
private static final Log LOG = LogFactory.getLog(NodeFencer.class);
/**
* Standard fencing methods included with Hadoop.
*/
private static final Map<String, Class<? extends FenceMethod>> STANDARD_METHODS =
ImmutableMap.<String, Class<? extends FenceMethod>>of(
"shell", ShellCommandFencer.class,
"sshfence", SshFenceByTcpPort.class);
private final List<FenceMethodWithArg> methods;
NodeFencer(Configuration conf, String spec)
throws BadFencingConfigurationException {
this.methods = parseMethods(conf, spec);
}
public static NodeFencer create(Configuration conf, String confKey)
throws BadFencingConfigurationException {
String confStr = conf.get(confKey);
if (confStr == null) {
return null;
}
return new NodeFencer(conf, confStr);
}
public boolean fence(HAServiceTarget fromSvc) {
LOG.info("====== Beginning Service Fencing Process... ======");
int i = 0;
for (FenceMethodWithArg method : methods) {
LOG.info("Trying method " + (++i) + "/" + methods.size() +": " + method);
try {
if (method.method.tryFence(fromSvc, method.arg)) {
LOG.info("====== Fencing successful by method " + method + " ======");
return true;
}
} catch (BadFencingConfigurationException e) {
LOG.error("Fencing method " + method + " misconfigured", e);
continue;
} catch (Throwable t) {
LOG.error("Fencing method " + method + " failed with an unexpected error.", t);
continue;
}
LOG.warn("Fencing method " + method + " was unsuccessful.");
}
LOG.error("Unable to fence service by any configured method.");
return false;
}
private static List<FenceMethodWithArg> parseMethods(Configuration conf,
String spec)
throws BadFencingConfigurationException {
String[] lines = spec.split("\\s*\n\\s*");
List<FenceMethodWithArg> methods = Lists.newArrayList();
for (String line : lines) {
line = HASH_COMMENT_RE.matcher(line).replaceAll("");
line = line.trim();
if (!line.isEmpty()) {
methods.add(parseMethod(conf, line));
}
}
return methods;
}
private static FenceMethodWithArg parseMethod(Configuration conf, String line)
throws BadFencingConfigurationException {
Matcher m;
if ((m = CLASS_WITH_ARGUMENT.matcher(line)).matches()) {
String className = m.group(1);
String arg = m.group(2);
return createFenceMethod(conf, className, arg);
} else if ((m = CLASS_WITHOUT_ARGUMENT.matcher(line)).matches()) {
String className = m.group(1);
return createFenceMethod(conf, className, null);
} else {
throw new BadFencingConfigurationException(
"Unable to parse line: '" + line + "'");
}
}
private static FenceMethodWithArg createFenceMethod(
Configuration conf, String clazzName, String arg)
throws BadFencingConfigurationException {
Class<?> clazz;
try {
// See if it's a short name for one of the built-in methods
clazz = STANDARD_METHODS.get(clazzName);
if (clazz == null) {
// Try to instantiate the user's custom method
clazz = Class.forName(clazzName);
}
} catch (Exception e) {
throw new BadFencingConfigurationException(
"Could not find configured fencing method " + clazzName,
e);
}
// Check that it implements the right interface
if (!FenceMethod.class.isAssignableFrom(clazz)) {
throw new BadFencingConfigurationException("Class " + clazzName +
" does not implement FenceMethod");
}
FenceMethod method = (FenceMethod)ReflectionUtils.newInstance(
clazz, conf);
method.checkArgs(arg);
return new FenceMethodWithArg(method, arg);
}
private static class FenceMethodWithArg {
private final FenceMethod method;
private final String arg;
private FenceMethodWithArg(FenceMethod method, String arg) {
this.method = method;
this.arg = arg;
}
@Override
public String toString() {
return method.getClass().getCanonicalName() + "(" + arg + ")";
}
}
}
| 6,660 | 33.512953 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthCheckFailedException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Exception thrown to indicate that health check of a service failed.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HealthCheckFailedException extends IOException {
private static final long serialVersionUID = 1L;
public HealthCheckFailedException(final String message) {
super(message);
}
public HealthCheckFailedException(String message, Throwable cause) {
super(message, cause);
}
}
| 1,426 | 33.804878 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
/**
* A fencing method is a method by which one node can forcibly prevent
* another node from making continued progress. This might be implemented
* by killing a process on the other node, by denying the other node's
* access to shared storage, or by accessing a PDU to cut the other node's
* power.
* <p>
* Since these methods are often vendor- or device-specific, operators
* may implement this interface in order to achieve fencing.
* <p>
* Fencing is configured by the operator as an ordered list of methods to
* attempt. Each method will be tried in turn, and the next in the list
* will only be attempted if the previous one fails. See {@link NodeFencer}
* for more information.
* <p>
* If an implementation also implements {@link Configurable} then its
* <code>setConf</code> method will be called upon instantiation.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public interface FenceMethod {
/**
* Verify that the given fencing method's arguments are valid.
* @param args the arguments provided in the configuration. This may
* be null if the operator did not configure any arguments.
* @throws BadFencingConfigurationException if the arguments are invalid
*/
public void checkArgs(String args) throws BadFencingConfigurationException;
/**
* Attempt to fence the target node.
* @param serviceAddr the address (host:ipcport) of the service to fence
* @param args the configured arguments, which were checked at startup by
* {@link #checkArgs(String)}
* @return true if fencing was successful, false if unsuccessful or
* indeterminate
* @throws BadFencingConfigurationException if the configuration was
* determined to be invalid only at runtime
*/
public boolean tryFence(HAServiceTarget target, String args)
throws BadFencingConfigurationException;
}
| 2,889 | 42.787879 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@InterfaceAudience.Private
public class HAServiceStatus {
private HAServiceState state;
private boolean readyToBecomeActive;
private String notReadyReason;
public HAServiceStatus(HAServiceState state) {
this.state = state;
}
public HAServiceState getState() {
return state;
}
public HAServiceStatus setReadyToBecomeActive() {
this.readyToBecomeActive = true;
this.notReadyReason = null;
return this;
}
public HAServiceStatus setNotReadyToBecomeActive(String reason) {
this.readyToBecomeActive = false;
this.notReadyReason = reason;
return this;
}
public boolean isReadyToBecomeActive() {
return readyToBecomeActive;
}
public String getNotReadyReason() {
return notReadyReason;
}
}
| 1,721 | 29.210526 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Map;
import javax.net.SocketFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
import org.apache.hadoop.ha.protocolPB.ZKFCProtocolClientSideTranslatorPB;
import org.apache.hadoop.net.NetUtils;
import com.google.common.collect.Maps;
/**
* Represents a target of the client side HA administration commands.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class HAServiceTarget {
private static final String HOST_SUBST_KEY = "host";
private static final String PORT_SUBST_KEY = "port";
private static final String ADDRESS_SUBST_KEY = "address";
/**
* @return the IPC address of the target node.
*/
public abstract InetSocketAddress getAddress();
/**
* @return the IPC address of the ZKFC on the target node
*/
public abstract InetSocketAddress getZKFCAddress();
/**
* @return a Fencer implementation configured for this target node
*/
public abstract NodeFencer getFencer();
/**
* @throws BadFencingConfigurationException if the fencing configuration
* appears to be invalid. This is divorced from the above
* {@link #getFencer()} method so that the configuration can be checked
* during the pre-flight phase of failover.
*/
public abstract void checkFencingConfigured()
throws BadFencingConfigurationException;
/**
* @return a proxy to connect to the target HA Service.
*/
public HAServiceProtocol getProxy(Configuration conf, int timeoutMs)
throws IOException {
Configuration confCopy = new Configuration(conf);
// Lower the timeout so we quickly fail to connect
confCopy.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
return new HAServiceProtocolClientSideTranslatorPB(
getAddress(),
confCopy, factory, timeoutMs);
}
/**
* @return a proxy to the ZKFC which is associated with this HA service.
*/
public ZKFCProtocol getZKFCProxy(Configuration conf, int timeoutMs)
throws IOException {
Configuration confCopy = new Configuration(conf);
// Lower the timeout so we quickly fail to connect
confCopy.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
return new ZKFCProtocolClientSideTranslatorPB(
getZKFCAddress(),
confCopy, factory, timeoutMs);
}
public final Map<String, String> getFencingParameters() {
Map<String, String> ret = Maps.newHashMap();
addFencingParameters(ret);
return ret;
}
/**
* Hook to allow subclasses to add any parameters they would like to
* expose to fencing implementations/scripts. Fencing methods are free
* to use this map as they see fit -- notably, the shell script
* implementation takes each entry, prepends 'target_', substitutes
* '_' for '.', and adds it to the environment of the script.
*
* Subclass implementations should be sure to delegate to the superclass
* implementation as well as adding their own keys.
*
* @param ret map which can be mutated to pass parameters to the fencer
*/
protected void addFencingParameters(Map<String, String> ret) {
ret.put(ADDRESS_SUBST_KEY, String.valueOf(getAddress()));
ret.put(HOST_SUBST_KEY, getAddress().getHostName());
ret.put(PORT_SUBST_KEY, String.valueOf(getAddress().getPort()));
}
/**
* @return true if auto failover should be considered enabled
*/
public boolean isAutoFailoverEnabled() {
return false;
}
}
| 4,765 | 35.661538 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
/**
* Class responsible for pumping the streams of the subprocess
* out to log4j. stderr is pumped to WARN level and stdout is
* pumped to INFO level
*/
class StreamPumper {
enum StreamType {
STDOUT, STDERR;
}
private final Log log;
final Thread thread;
final String logPrefix;
final StreamPumper.StreamType type;
private final InputStream stream;
private boolean started = false;
StreamPumper(final Log log, final String logPrefix,
final InputStream stream, final StreamType type) {
this.log = log;
this.logPrefix = logPrefix;
this.stream = stream;
this.type = type;
thread = new Thread(new Runnable() {
@Override
public void run() {
try {
pump();
} catch (Throwable t) {
ShellCommandFencer.LOG.warn(logPrefix +
": Unable to pump output from " + type,
t);
}
}
}, logPrefix + ": StreamPumper for " + type);
thread.setDaemon(true);
}
void join() throws InterruptedException {
assert started;
thread.join();
}
void start() {
assert !started;
thread.start();
started = true;
}
protected void pump() throws IOException {
InputStreamReader inputStreamReader = new InputStreamReader(
stream, Charsets.UTF_8);
BufferedReader br = new BufferedReader(inputStreamReader);
String line = null;
while ((line = br.readLine()) != null) {
if (type == StreamType.STDOUT) {
log.info(logPrefix + ": " + line);
} else {
log.warn(logPrefix + ": " + line);
}
}
}
}
| 2,655 | 27.55914 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.KerberosInfo;
import java.io.IOException;
/**
* Protocol interface that provides High Availability related primitives to
* monitor and fail-over the service.
*
* This interface could be used by HA frameworks to manage the service.
*/
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface HAServiceProtocol {
/**
* Initial version of the protocol
*/
public static final long versionID = 1L;
/**
* An HA service may be in active or standby state. During startup, it is in
* an unknown INITIALIZING state. During shutdown, it is in the STOPPING state
* and can no longer return to active/standby states.
*/
public enum HAServiceState {
INITIALIZING("initializing"),
ACTIVE("active"),
STANDBY("standby"),
STOPPING("stopping");
private String name;
HAServiceState(String name) {
this.name = name;
}
@Override
public String toString() {
return name;
}
}
public static enum RequestSource {
REQUEST_BY_USER,
REQUEST_BY_USER_FORCED,
REQUEST_BY_ZKFC;
}
/**
* Information describing the source for a request to change state.
* This is used to differentiate requests from automatic vs CLI
* failover controllers, and in the future may include epoch
* information.
*/
public static class StateChangeRequestInfo {
private final RequestSource source;
public StateChangeRequestInfo(RequestSource source) {
super();
this.source = source;
}
public RequestSource getSource() {
return source;
}
}
/**
* Monitor the health of service. This periodically called by the HA
* frameworks to monitor the health of the service.
*
* Service is expected to perform checks to ensure it is functional.
* If the service is not healthy due to failure or partial failure,
* it is expected to throw {@link HealthCheckFailedException}.
* The definition of service not healthy is left to the service.
*
* Note that when health check of an Active service fails,
* failover to standby may be done.
*
* @throws HealthCheckFailedException
* if the health check of a service fails.
* @throws AccessControlException
* if access is denied.
* @throws IOException
* if other errors happen
*/
@Idempotent
public void monitorHealth() throws HealthCheckFailedException,
AccessControlException,
IOException;
/**
* Request service to transition to active state. No operation, if the
* service is already in active state.
*
* @throws ServiceFailedException
* if transition from standby to active fails.
* @throws AccessControlException
* if access is denied.
* @throws IOException
* if other errors happen
*/
@Idempotent
public void transitionToActive(StateChangeRequestInfo reqInfo)
throws ServiceFailedException,
AccessControlException,
IOException;
/**
* Request service to transition to standby state. No operation, if the
* service is already in standby state.
*
* @throws ServiceFailedException
* if transition from active to standby fails.
* @throws AccessControlException
* if access is denied.
* @throws IOException
* if other errors happen
*/
@Idempotent
public void transitionToStandby(StateChangeRequestInfo reqInfo)
throws ServiceFailedException,
AccessControlException,
IOException;
/**
* Return the current status of the service. The status indicates
* the current <em>state</em> (e.g ACTIVE/STANDBY) as well as
* some additional information. {@see HAServiceStatus}
*
* @throws AccessControlException
* if access is denied.
* @throws IOException
* if other errors happen
*/
@Idempotent
public HAServiceStatus getServiceStatus() throws AccessControlException,
IOException;
}
| 5,526 | 32.49697 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ipc.RemoteException;
/**
* Helper for making {@link HAServiceProtocol} RPC calls. This helper
* unwraps the {@link RemoteException} to specific exceptions.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HAServiceProtocolHelper {
public static void monitorHealth(HAServiceProtocol svc,
StateChangeRequestInfo reqInfo)
throws IOException {
try {
svc.monitorHealth();
} catch (RemoteException e) {
throw e.unwrapRemoteException(HealthCheckFailedException.class);
}
}
public static void transitionToActive(HAServiceProtocol svc,
StateChangeRequestInfo reqInfo)
throws IOException {
try {
svc.transitionToActive(reqInfo);
} catch (RemoteException e) {
throw e.unwrapRemoteException(ServiceFailedException.class);
}
}
public static void transitionToStandby(HAServiceProtocol svc,
StateChangeRequestInfo reqInfo)
throws IOException {
try {
svc.transitionToStandby(reqInfo);
} catch (RemoteException e) {
throw e.unwrapRemoteException(ServiceFailedException.class);
}
}
}
| 2,195 | 33.3125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
/**
* A command-line tool for making calls in the HAServiceProtocol.
* For example,. this can be used to force a service to standby or active
* mode, or to trigger a health-check.
*/
@InterfaceAudience.Private
public abstract class HAAdmin extends Configured implements Tool {
private static final String FORCEFENCE = "forcefence";
private static final String FORCEACTIVE = "forceactive";
/**
* Undocumented flag which allows an administrator to use manual failover
* state transitions even when auto-failover is enabled. This is an unsafe
* operation, which is why it is not documented in the usage below.
*/
private static final String FORCEMANUAL = "forcemanual";
private static final Log LOG = LogFactory.getLog(HAAdmin.class);
private int rpcTimeoutForChecks = -1;
protected final static Map<String, UsageInfo> USAGE =
ImmutableMap.<String, UsageInfo>builder()
.put("-transitionToActive",
new UsageInfo("[--"+FORCEACTIVE+"] <serviceId>", "Transitions the service into Active state"))
.put("-transitionToStandby",
new UsageInfo("<serviceId>", "Transitions the service into Standby state"))
.put("-failover",
new UsageInfo("[--"+FORCEFENCE+"] [--"+FORCEACTIVE+"] <serviceId> <serviceId>",
"Failover from the first service to the second.\n" +
"Unconditionally fence services if the --"+FORCEFENCE+" option is used.\n" +
"Try to failover to the target service even if it is not ready if the " +
"--" + FORCEACTIVE + " option is used."))
.put("-getServiceState",
new UsageInfo("<serviceId>", "Returns the state of the service"))
.put("-checkHealth",
new UsageInfo("<serviceId>",
"Requests that the service perform a health check.\n" +
"The HAAdmin tool will exit with a non-zero exit code\n" +
"if the check fails."))
.put("-help",
new UsageInfo("<command>", "Displays help on the specified command"))
.build();
/** Output stream for errors, for use in tests */
protected PrintStream errOut = System.err;
protected PrintStream out = System.out;
private RequestSource requestSource = RequestSource.REQUEST_BY_USER;
protected HAAdmin() {
super();
}
protected HAAdmin(Configuration conf) {
super(conf);
}
protected abstract HAServiceTarget resolveTarget(String string);
protected Collection<String> getTargetIds(String targetNodeToActivate) {
return new ArrayList<String>(
Arrays.asList(new String[]{targetNodeToActivate}));
}
protected String getUsageString() {
return "Usage: HAAdmin";
}
protected void printUsage(PrintStream errOut) {
errOut.println(getUsageString());
for (Map.Entry<String, UsageInfo> e : USAGE.entrySet()) {
String cmd = e.getKey();
UsageInfo usage = e.getValue();
errOut.println(" [" + cmd + " " + usage.args + "]");
}
errOut.println();
ToolRunner.printGenericCommandUsage(errOut);
}
private void printUsage(PrintStream errOut, String cmd) {
UsageInfo usage = USAGE.get(cmd);
if (usage == null) {
throw new RuntimeException("No usage for cmd " + cmd);
}
errOut.println(getUsageString() + " [" + cmd + " " + usage.args + "]");
}
private int transitionToActive(final CommandLine cmd)
throws IOException, ServiceFailedException {
String[] argv = cmd.getArgs();
if (argv.length != 1) {
errOut.println("transitionToActive: incorrect number of arguments");
printUsage(errOut, "-transitionToActive");
return -1;
}
/* returns true if other target node is active or some exception occurred
and forceActive was not set */
if(!cmd.hasOption(FORCEACTIVE)) {
if(isOtherTargetNodeActive(argv[0], cmd.hasOption(FORCEACTIVE))) {
return -1;
}
}
HAServiceTarget target = resolveTarget(argv[0]);
if (!checkManualStateManagementOK(target)) {
return -1;
}
HAServiceProtocol proto = target.getProxy(
getConf(), 0);
HAServiceProtocolHelper.transitionToActive(proto, createReqInfo());
return 0;
}
/**
* Checks whether other target node is active or not
* @param targetNodeToActivate
* @return true if other target node is active or some other exception
* occurred and forceActive was set otherwise false
* @throws IOException
*/
private boolean isOtherTargetNodeActive(String targetNodeToActivate, boolean forceActive)
throws IOException {
Collection<String> targetIds = getTargetIds(targetNodeToActivate);
targetIds.remove(targetNodeToActivate);
for(String targetId : targetIds) {
HAServiceTarget target = resolveTarget(targetId);
if (!checkManualStateManagementOK(target)) {
return true;
}
try {
HAServiceProtocol proto = target.getProxy(getConf(), 5000);
if(proto.getServiceStatus().getState() == HAServiceState.ACTIVE) {
errOut.println("transitionToActive: Node " + targetId +" is already active");
printUsage(errOut, "-transitionToActive");
return true;
}
} catch (Exception e) {
//If forceActive switch is false then return true
if(!forceActive) {
errOut.println("Unexpected error occurred " + e.getMessage());
printUsage(errOut, "-transitionToActive");
return true;
}
}
}
return false;
}
private int transitionToStandby(final CommandLine cmd)
throws IOException, ServiceFailedException {
String[] argv = cmd.getArgs();
if (argv.length != 1) {
errOut.println("transitionToStandby: incorrect number of arguments");
printUsage(errOut, "-transitionToStandby");
return -1;
}
HAServiceTarget target = resolveTarget(argv[0]);
if (!checkManualStateManagementOK(target)) {
return -1;
}
HAServiceProtocol proto = target.getProxy(
getConf(), 0);
HAServiceProtocolHelper.transitionToStandby(proto, createReqInfo());
return 0;
}
/**
* Ensure that we are allowed to manually manage the HA state of the target
* service. If automatic failover is configured, then the automatic
* failover controllers should be doing state management, and it is generally
* an error to use the HAAdmin command line to do so.
*
* @param target the target to check
* @return true if manual state management is allowed
*/
private boolean checkManualStateManagementOK(HAServiceTarget target) {
if (target.isAutoFailoverEnabled()) {
if (requestSource != RequestSource.REQUEST_BY_USER_FORCED) {
errOut.println(
"Automatic failover is enabled for " + target + "\n" +
"Refusing to manually manage HA state, since it may cause\n" +
"a split-brain scenario or other incorrect state.\n" +
"If you are very sure you know what you are doing, please \n" +
"specify the --" + FORCEMANUAL + " flag.");
return false;
} else {
LOG.warn("Proceeding with manual HA state management even though\n" +
"automatic failover is enabled for " + target);
return true;
}
}
return true;
}
private StateChangeRequestInfo createReqInfo() {
return new StateChangeRequestInfo(requestSource);
}
private int failover(CommandLine cmd)
throws IOException, ServiceFailedException {
boolean forceFence = cmd.hasOption(FORCEFENCE);
boolean forceActive = cmd.hasOption(FORCEACTIVE);
int numOpts = cmd.getOptions() == null ? 0 : cmd.getOptions().length;
final String[] args = cmd.getArgs();
if (numOpts > 3 || args.length != 2) {
errOut.println("failover: incorrect arguments");
printUsage(errOut, "-failover");
return -1;
}
HAServiceTarget fromNode = resolveTarget(args[0]);
HAServiceTarget toNode = resolveTarget(args[1]);
// Check that auto-failover is consistently configured for both nodes.
Preconditions.checkState(
fromNode.isAutoFailoverEnabled() ==
toNode.isAutoFailoverEnabled(),
"Inconsistent auto-failover configs between %s and %s!",
fromNode, toNode);
if (fromNode.isAutoFailoverEnabled()) {
if (forceFence || forceActive) {
// -forceActive doesn't make sense with auto-HA, since, if the node
// is not healthy, then its ZKFC will immediately quit the election
// again the next time a health check runs.
//
// -forceFence doesn't seem to have any real use cases with auto-HA
// so it isn't implemented.
errOut.println(FORCEFENCE + " and " + FORCEACTIVE + " flags not " +
"supported with auto-failover enabled.");
return -1;
}
try {
return gracefulFailoverThroughZKFCs(toNode);
} catch (UnsupportedOperationException e){
errOut.println("Failover command is not supported with " +
"auto-failover enabled: " + e.getLocalizedMessage());
return -1;
}
}
FailoverController fc = new FailoverController(getConf(),
requestSource);
try {
fc.failover(fromNode, toNode, forceFence, forceActive);
out.println("Failover from "+args[0]+" to "+args[1]+" successful");
} catch (FailoverFailedException ffe) {
errOut.println("Failover failed: " + ffe.getLocalizedMessage());
return -1;
}
return 0;
}
/**
* Initiate a graceful failover by talking to the target node's ZKFC.
* This sends an RPC to the ZKFC, which coordinates the failover.
*
* @param toNode the node to fail to
* @return status code (0 for success)
* @throws IOException if failover does not succeed
*/
private int gracefulFailoverThroughZKFCs(HAServiceTarget toNode)
throws IOException {
int timeout = FailoverController.getRpcTimeoutToNewActive(getConf());
ZKFCProtocol proxy = toNode.getZKFCProxy(getConf(), timeout);
try {
proxy.gracefulFailover();
out.println("Failover to " + toNode + " successful");
} catch (ServiceFailedException sfe) {
errOut.println("Failover failed: " + sfe.getLocalizedMessage());
return -1;
}
return 0;
}
private int checkHealth(final CommandLine cmd)
throws IOException, ServiceFailedException {
String[] argv = cmd.getArgs();
if (argv.length != 1) {
errOut.println("checkHealth: incorrect number of arguments");
printUsage(errOut, "-checkHealth");
return -1;
}
HAServiceProtocol proto = resolveTarget(argv[0]).getProxy(
getConf(), rpcTimeoutForChecks);
try {
HAServiceProtocolHelper.monitorHealth(proto, createReqInfo());
} catch (HealthCheckFailedException e) {
errOut.println("Health check failed: " + e.getLocalizedMessage());
return -1;
}
return 0;
}
private int getServiceState(final CommandLine cmd)
throws IOException, ServiceFailedException {
String[] argv = cmd.getArgs();
if (argv.length != 1) {
errOut.println("getServiceState: incorrect number of arguments");
printUsage(errOut, "-getServiceState");
return -1;
}
HAServiceProtocol proto = resolveTarget(argv[0]).getProxy(
getConf(), rpcTimeoutForChecks);
out.println(proto.getServiceStatus().getState());
return 0;
}
/**
* Return the serviceId as is, we are assuming it was
* given as a service address of form <host:ipcport>.
*/
protected String getServiceAddr(String serviceId) {
return serviceId;
}
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
rpcTimeoutForChecks = conf.getInt(
CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
}
}
@Override
public int run(String[] argv) throws Exception {
try {
return runCmd(argv);
} catch (IllegalArgumentException iae) {
errOut.println("Illegal argument: " + iae.getLocalizedMessage());
return -1;
} catch (IOException ioe) {
errOut.println("Operation failed: " + ioe.getLocalizedMessage());
if (LOG.isDebugEnabled()) {
LOG.debug("Operation failed", ioe);
}
return -1;
}
}
protected int runCmd(String[] argv) throws Exception {
if (argv.length < 1) {
printUsage(errOut);
return -1;
}
String cmd = argv[0];
if (!cmd.startsWith("-")) {
errOut.println("Bad command '" + cmd + "': expected command starting with '-'");
printUsage(errOut);
return -1;
}
if (!USAGE.containsKey(cmd)) {
errOut.println(cmd.substring(1) + ": Unknown command");
printUsage(errOut);
return -1;
}
Options opts = new Options();
// Add command-specific options
if ("-failover".equals(cmd)) {
addFailoverCliOpts(opts);
}
if("-transitionToActive".equals(cmd)) {
addTransitionToActiveCliOpts(opts);
}
// Mutative commands take FORCEMANUAL option
if ("-transitionToActive".equals(cmd) ||
"-transitionToStandby".equals(cmd) ||
"-failover".equals(cmd)) {
opts.addOption(FORCEMANUAL, false,
"force manual control even if auto-failover is enabled");
}
CommandLine cmdLine = parseOpts(cmd, opts, argv);
if (cmdLine == null) {
// error already printed
return -1;
}
if (cmdLine.hasOption(FORCEMANUAL)) {
if (!confirmForceManual()) {
LOG.fatal("Aborted");
return -1;
}
// Instruct the NNs to honor this request even if they're
// configured for manual failover.
requestSource = RequestSource.REQUEST_BY_USER_FORCED;
}
if ("-transitionToActive".equals(cmd)) {
return transitionToActive(cmdLine);
} else if ("-transitionToStandby".equals(cmd)) {
return transitionToStandby(cmdLine);
} else if ("-failover".equals(cmd)) {
return failover(cmdLine);
} else if ("-getServiceState".equals(cmd)) {
return getServiceState(cmdLine);
} else if ("-checkHealth".equals(cmd)) {
return checkHealth(cmdLine);
} else if ("-help".equals(cmd)) {
return help(argv);
} else {
// we already checked command validity above, so getting here
// would be a coding error
throw new AssertionError("Should not get here, command: " + cmd);
}
}
private boolean confirmForceManual() throws IOException {
return ToolRunner.confirmPrompt(
"You have specified the --" + FORCEMANUAL + " flag. This flag is " +
"dangerous, as it can induce a split-brain scenario that WILL " +
"CORRUPT your HDFS namespace, possibly irrecoverably.\n" +
"\n" +
"It is recommended not to use this flag, but instead to shut down the " +
"cluster and disable automatic failover if you prefer to manually " +
"manage your HA state.\n" +
"\n" +
"You may abort safely by answering 'n' or hitting ^C now.\n" +
"\n" +
"Are you sure you want to continue?");
}
/**
* Add CLI options which are specific to the failover command and no
* others.
*/
private void addFailoverCliOpts(Options failoverOpts) {
failoverOpts.addOption(FORCEFENCE, false, "force fencing");
failoverOpts.addOption(FORCEACTIVE, false, "force failover");
// Don't add FORCEMANUAL, since that's added separately for all commands
// that change state.
}
/**
* Add CLI options which are specific to the transitionToActive command and
* no others.
*/
private void addTransitionToActiveCliOpts(Options transitionToActiveCliOpts) {
transitionToActiveCliOpts.addOption(FORCEACTIVE, false, "force active");
}
private CommandLine parseOpts(String cmdName, Options opts, String[] argv) {
try {
// Strip off the first arg, since that's just the command name
argv = Arrays.copyOfRange(argv, 1, argv.length);
return new GnuParser().parse(opts, argv);
} catch (ParseException pe) {
errOut.println(cmdName.substring(1) +
": incorrect arguments");
printUsage(errOut, cmdName);
return null;
}
}
private int help(String[] argv) {
if (argv.length == 1) { // only -help
printUsage(out);
return 0;
} else if (argv.length != 2) {
printUsage(errOut, "-help");
return -1;
}
String cmd = argv[1];
if (!cmd.startsWith("-")) {
cmd = "-" + cmd;
}
UsageInfo usageInfo = USAGE.get(cmd);
if (usageInfo == null) {
errOut.println(cmd + ": Unknown command");
printUsage(errOut);
return -1;
}
out.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
return 0;
}
protected static class UsageInfo {
public final String args;
public final String help;
public UsageInfo(String args, String help) {
this.args = args;
this.help = help;
}
}
}
| 18,899 | 33.42623 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverFailedException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Exception thrown to indicate service failover has failed.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class FailoverFailedException extends Exception {
private static final long serialVersionUID = 1L;
public FailoverFailedException(final String message) {
super(message);
}
public FailoverFailedException(String message, Throwable cause) {
super(message, cause);
}
}
| 1,374 | 34.25641 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configured;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.util.Shell;
/**
* Fencing method that runs a shell command. It should be specified
* in the fencing configuration like:<br>
* <code>
* shell(/path/to/my/script.sh arg1 arg2 ...)
* </code><br>
* The string between '(' and ')' is passed directly to a bash shell
* (cmd.exe on Windows) and may not include any closing parentheses.<p>
*
* The shell command will be run with an environment set up to contain
* all of the current Hadoop configuration variables, with the '_' character
* replacing any '.' characters in the configuration keys.<p>
*
* If the shell command returns an exit code of 0, the fencing is
* determined to be successful. If it returns any other exit code, the
* fencing was not successful and the next fencing method in the list
* will be attempted.<p>
*
* <em>Note:</em> this fencing method does not implement any timeout.
* If timeouts are necessary, they should be implemented in the shell
* script itself (eg by forking a subshell to kill its parent in
* some number of seconds).
*/
public class ShellCommandFencer
extends Configured implements FenceMethod {
/** Length at which to abbreviate command in long messages */
private static final int ABBREV_LENGTH = 20;
/** Prefix for target parameters added to the environment */
private static final String TARGET_PREFIX = "target_";
@VisibleForTesting
static Log LOG = LogFactory.getLog(
ShellCommandFencer.class);
@Override
public void checkArgs(String args) throws BadFencingConfigurationException {
if (args == null || args.isEmpty()) {
throw new BadFencingConfigurationException(
"No argument passed to 'shell' fencing method");
}
// Nothing else we can really check without actually running the command
}
@Override
public boolean tryFence(HAServiceTarget target, String cmd) {
ProcessBuilder builder;
if (!Shell.WINDOWS) {
builder = new ProcessBuilder("bash", "-e", "-c", cmd);
} else {
builder = new ProcessBuilder("cmd.exe", "/c", cmd);
}
setConfAsEnvVars(builder.environment());
addTargetInfoAsEnvVars(target, builder.environment());
Process p;
try {
p = builder.start();
p.getOutputStream().close();
} catch (IOException e) {
LOG.warn("Unable to execute " + cmd, e);
return false;
}
String pid = tryGetPid(p);
LOG.info("Launched fencing command '" + cmd + "' with "
+ ((pid != null) ? ("pid " + pid) : "unknown pid"));
String logPrefix = abbreviate(cmd, ABBREV_LENGTH);
if (pid != null) {
logPrefix = "[PID " + pid + "] " + logPrefix;
}
// Pump logs to stderr
StreamPumper errPumper = new StreamPumper(
LOG, logPrefix, p.getErrorStream(),
StreamPumper.StreamType.STDERR);
errPumper.start();
StreamPumper outPumper = new StreamPumper(
LOG, logPrefix, p.getInputStream(),
StreamPumper.StreamType.STDOUT);
outPumper.start();
int rc;
try {
rc = p.waitFor();
errPumper.join();
outPumper.join();
} catch (InterruptedException ie) {
LOG.warn("Interrupted while waiting for fencing command: " + cmd);
return false;
}
return rc == 0;
}
/**
* Abbreviate a string by putting '...' in the middle of it,
* in an attempt to keep logs from getting too messy.
* @param cmd the string to abbreviate
* @param len maximum length to abbreviate to
* @return abbreviated string
*/
static String abbreviate(String cmd, int len) {
if (cmd.length() > len && len >= 5) {
int firstHalf = (len - 3) / 2;
int rem = len - firstHalf - 3;
return cmd.substring(0, firstHalf) +
"..." + cmd.substring(cmd.length() - rem);
} else {
return cmd;
}
}
/**
* Attempt to use evil reflection tricks to determine the
* pid of a launched process. This is helpful to ops
* if debugging a fencing process that might have gone
* wrong. If running on a system or JVM where this doesn't
* work, it will simply return null.
*/
private static String tryGetPid(Process p) {
try {
Class<? extends Process> clazz = p.getClass();
if (clazz.getName().equals("java.lang.UNIXProcess")) {
Field f = clazz.getDeclaredField("pid");
f.setAccessible(true);
return String.valueOf(f.getInt(p));
} else {
LOG.trace("Unable to determine pid for " + p
+ " since it is not a UNIXProcess");
return null;
}
} catch (Throwable t) {
LOG.trace("Unable to determine pid for " + p, t);
return null;
}
}
/**
* Set the environment of the subprocess to be the Configuration,
* with '.'s replaced by '_'s.
*/
private void setConfAsEnvVars(Map<String, String> env) {
for (Map.Entry<String, String> pair : getConf()) {
env.put(pair.getKey().replace('.', '_'), pair.getValue());
}
}
/**
* Add information about the target to the the environment of the
* subprocess.
*
* @param target
* @param environment
*/
private void addTargetInfoAsEnvVars(HAServiceTarget target,
Map<String, String> environment) {
for (Map.Entry<String, String> e :
target.getFencingParameters().entrySet()) {
String key = TARGET_PREFIX + e.getKey();
key = key.replace('.', '_');
environment.put(key, e.getValue());
}
}
}
| 6,558 | 31.631841 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ServiceFailedException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Exception thrown to indicate that an operation performed
* to modify the state of a service or application failed.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ServiceFailedException extends IOException {
private static final long serialVersionUID = 1L;
public ServiceFailedException(final String message) {
super(message);
}
public ServiceFailedException(String message, Throwable cause) {
super(message, cause);
}
}
| 1,463 | 33.046512 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher.Event;
import org.apache.zookeeper.ZKUtil;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.AsyncCallback.*;
import org.apache.zookeeper.data.Stat;
import org.apache.zookeeper.KeeperException.Code;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
*
* This class implements a simple library to perform leader election on top of
* Apache Zookeeper. Using Zookeeper as a coordination service, leader election
* can be performed by atomically creating an ephemeral lock file (znode) on
* Zookeeper. The service instance that successfully creates the znode becomes
* active and the rest become standbys. <br/>
* This election mechanism is only efficient for small number of election
* candidates (order of 10's) because contention on single znode by a large
* number of candidates can result in Zookeeper overload. <br/>
* The elector does not guarantee fencing (protection of shared resources) among
* service instances. After it has notified an instance about becoming a leader,
* then that instance must ensure that it meets the service consistency
* requirements. If it cannot do so, then it is recommended to quit the
* election. The application implements the {@link ActiveStandbyElectorCallback}
* to interact with the elector
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ActiveStandbyElector implements StatCallback, StringCallback {
/**
* Callback interface to interact with the ActiveStandbyElector object. <br/>
* The application will be notified with a callback only on state changes
* (i.e. there will never be successive calls to becomeActive without an
* intermediate call to enterNeutralMode). <br/>
* The callbacks will be running on Zookeeper client library threads. The
* application should return from these callbacks quickly so as not to impede
* Zookeeper client library performance and notifications. The app will
* typically remember the state change and return from the callback. It will
* then proceed with implementing actions around that state change. It is
* possible to be called back again while these actions are in flight and the
* app should handle this scenario.
*/
public interface ActiveStandbyElectorCallback {
/**
* This method is called when the app becomes the active leader.
* If the service fails to become active, it should throw
* ServiceFailedException. This will cause the elector to
* sleep for a short period, then re-join the election.
*
* Callback implementations are expected to manage their own
* timeouts (e.g. when making an RPC to a remote node).
*/
void becomeActive() throws ServiceFailedException;
/**
* This method is called when the app becomes a standby
*/
void becomeStandby();
/**
* If the elector gets disconnected from Zookeeper and does not know about
* the lock state, then it will notify the service via the enterNeutralMode
* interface. The service may choose to ignore this or stop doing state
* changing operations. Upon reconnection, the elector verifies the leader
* status and calls back on the becomeActive and becomeStandby app
* interfaces. <br/>
* Zookeeper disconnects can happen due to network issues or loss of
* Zookeeper quorum. Thus enterNeutralMode can be used to guard against
* split-brain issues. In such situations it might be prudent to call
* becomeStandby too. However, such state change operations might be
* expensive and enterNeutralMode can help guard against doing that for
* transient issues.
*/
void enterNeutralMode();
/**
* If there is any fatal error (e.g. wrong ACL's, unexpected Zookeeper
* errors or Zookeeper persistent unavailability) then notifyFatalError is
* called to notify the app about it.
*/
void notifyFatalError(String errorMessage);
/**
* If an old active has failed, rather than exited gracefully, then
* the new active may need to take some fencing actions against it
* before proceeding with failover.
*
* @param oldActiveData the application data provided by the prior active
*/
void fenceOldActive(byte[] oldActiveData);
}
/**
* Name of the lock znode used by the library. Protected for access in test
* classes
*/
@VisibleForTesting
protected static final String LOCK_FILENAME = "ActiveStandbyElectorLock";
@VisibleForTesting
protected static final String BREADCRUMB_FILENAME = "ActiveBreadCrumb";
public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
private static final int SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE = 1000;
private static enum ConnectionState {
DISCONNECTED, CONNECTED, TERMINATED
};
static enum State {
INIT, ACTIVE, STANDBY, NEUTRAL
};
private State state = State.INIT;
private int createRetryCount = 0;
private int statRetryCount = 0;
private ZooKeeper zkClient;
private WatcherWithClientRef watcher;
private ConnectionState zkConnectionState = ConnectionState.TERMINATED;
private final ActiveStandbyElectorCallback appClient;
private final String zkHostPort;
private final int zkSessionTimeout;
private final List<ACL> zkAcl;
private final List<ZKAuthInfo> zkAuthInfo;
private byte[] appData;
private final String zkLockFilePath;
private final String zkBreadCrumbPath;
private final String znodeWorkingDir;
private final int maxRetryNum;
private Lock sessionReestablishLockForTests = new ReentrantLock();
private boolean wantToBeInElection;
private boolean monitorLockNodePending = false;
private ZooKeeper monitorLockNodeClient;
/**
* Create a new ActiveStandbyElector object <br/>
* The elector is created by providing to it the Zookeeper configuration, the
* parent znode under which to create the znode and a reference to the
* callback interface. <br/>
* The parent znode name must be the same for all service instances and
* different across services. <br/>
* After the leader has been lost, a new leader will be elected after the
* session timeout expires. Hence, the app must set this parameter based on
* its needs for failure response time. The session timeout must be greater
* than the Zookeeper disconnect timeout and is recommended to be 3X that
* value to enable Zookeeper to retry transient disconnections. Setting a very
* short session timeout may result in frequent transitions between active and
* standby states during issues like network outages/GS pauses.
*
* @param zookeeperHostPorts
* ZooKeeper hostPort for all ZooKeeper servers
* @param zookeeperSessionTimeout
* ZooKeeper session timeout
* @param parentZnodeName
* znode under which to create the lock
* @param acl
* ZooKeeper ACL's
* @param authInfo a list of authentication credentials to add to the
* ZK connection
* @param app
* reference to callback interface object
* @throws IOException
* @throws HadoopIllegalArgumentException
*/
public ActiveStandbyElector(String zookeeperHostPorts,
int zookeeperSessionTimeout, String parentZnodeName, List<ACL> acl,
List<ZKAuthInfo> authInfo,
ActiveStandbyElectorCallback app, int maxRetryNum) throws IOException,
HadoopIllegalArgumentException, KeeperException {
if (app == null || acl == null || parentZnodeName == null
|| zookeeperHostPorts == null || zookeeperSessionTimeout <= 0) {
throw new HadoopIllegalArgumentException("Invalid argument");
}
zkHostPort = zookeeperHostPorts;
zkSessionTimeout = zookeeperSessionTimeout;
zkAcl = acl;
zkAuthInfo = authInfo;
appClient = app;
znodeWorkingDir = parentZnodeName;
zkLockFilePath = znodeWorkingDir + "/" + LOCK_FILENAME;
zkBreadCrumbPath = znodeWorkingDir + "/" + BREADCRUMB_FILENAME;
this.maxRetryNum = maxRetryNum;
// createConnection for future API calls
createConnection();
}
/**
* To participate in election, the app will call joinElection. The result will
* be notified by a callback on either the becomeActive or becomeStandby app
* interfaces. <br/>
* After this the elector will automatically monitor the leader status and
* perform re-election if necessary<br/>
* The app could potentially start off in standby mode and ignore the
* becomeStandby call.
*
* @param data
* to be set by the app. non-null data must be set.
* @throws HadoopIllegalArgumentException
* if valid data is not supplied
*/
public synchronized void joinElection(byte[] data)
throws HadoopIllegalArgumentException {
if (data == null) {
throw new HadoopIllegalArgumentException("data cannot be null");
}
if (wantToBeInElection) {
LOG.info("Already in election. Not re-connecting.");
return;
}
appData = new byte[data.length];
System.arraycopy(data, 0, appData, 0, data.length);
if (LOG.isDebugEnabled()) {
LOG.debug("Attempting active election for " + this);
}
joinElectionInternal();
}
/**
* @return true if the configured parent znode exists
*/
public synchronized boolean parentZNodeExists()
throws IOException, InterruptedException {
Preconditions.checkState(zkClient != null);
try {
return zkClient.exists(znodeWorkingDir, false) != null;
} catch (KeeperException e) {
throw new IOException("Couldn't determine existence of znode '" +
znodeWorkingDir + "'", e);
}
}
/**
* Utility function to ensure that the configured base znode exists.
* This recursively creates the znode as well as all of its parents.
*/
public synchronized void ensureParentZNode()
throws IOException, InterruptedException {
Preconditions.checkState(!wantToBeInElection,
"ensureParentZNode() may not be called while in the election");
String pathParts[] = znodeWorkingDir.split("/");
Preconditions.checkArgument(pathParts.length >= 1 &&
pathParts[0].isEmpty(),
"Invalid path: %s", znodeWorkingDir);
StringBuilder sb = new StringBuilder();
for (int i = 1; i < pathParts.length; i++) {
sb.append("/").append(pathParts[i]);
String prefixPath = sb.toString();
LOG.debug("Ensuring existence of " + prefixPath);
try {
createWithRetries(prefixPath, new byte[]{}, zkAcl, CreateMode.PERSISTENT);
} catch (KeeperException e) {
if (isNodeExists(e.code())) {
// This is OK - just ensuring existence.
continue;
} else {
throw new IOException("Couldn't create " + prefixPath, e);
}
}
}
LOG.info("Successfully created " + znodeWorkingDir + " in ZK.");
}
/**
* Clear all of the state held within the parent ZNode.
* This recursively deletes everything within the znode as well as the
* parent znode itself. It should only be used when it's certain that
* no electors are currently participating in the election.
*/
public synchronized void clearParentZNode()
throws IOException, InterruptedException {
Preconditions.checkState(!wantToBeInElection,
"clearParentZNode() may not be called while in the election");
try {
LOG.info("Recursively deleting " + znodeWorkingDir + " from ZK...");
zkDoWithRetries(new ZKAction<Void>() {
@Override
public Void run() throws KeeperException, InterruptedException {
ZKUtil.deleteRecursive(zkClient, znodeWorkingDir);
return null;
}
});
} catch (KeeperException e) {
throw new IOException("Couldn't clear parent znode " + znodeWorkingDir,
e);
}
LOG.info("Successfully deleted " + znodeWorkingDir + " from ZK.");
}
/**
* Any service instance can drop out of the election by calling quitElection.
* <br/>
* This will lose any leader status, if held, and stop monitoring of the lock
* node. <br/>
* If the instance wants to participate in election again, then it needs to
* call joinElection(). <br/>
* This allows service instances to take themselves out of rotation for known
* impending unavailable states (e.g. long GC pause or software upgrade).
*
* @param needFence true if the underlying daemon may need to be fenced
* if a failover occurs due to dropping out of the election.
*/
public synchronized void quitElection(boolean needFence) {
LOG.info("Yielding from election");
if (!needFence && state == State.ACTIVE) {
// If active is gracefully going back to standby mode, remove
// our permanent znode so no one fences us.
tryDeleteOwnBreadCrumbNode();
}
reset();
wantToBeInElection = false;
}
/**
* Exception thrown when there is no active leader
*/
public static class ActiveNotFoundException extends Exception {
private static final long serialVersionUID = 3505396722342846462L;
}
/**
* get data set by the active leader
*
* @return data set by the active instance
* @throws ActiveNotFoundException
* when there is no active leader
* @throws KeeperException
* other zookeeper operation errors
* @throws InterruptedException
* @throws IOException
* when ZooKeeper connection could not be established
*/
public synchronized byte[] getActiveData() throws ActiveNotFoundException,
KeeperException, InterruptedException, IOException {
try {
if (zkClient == null) {
createConnection();
}
Stat stat = new Stat();
return getDataWithRetries(zkLockFilePath, false, stat);
} catch(KeeperException e) {
Code code = e.code();
if (isNodeDoesNotExist(code)) {
// handle the commonly expected cases that make sense for us
throw new ActiveNotFoundException();
} else {
throw e;
}
}
}
/**
* interface implementation of Zookeeper callback for create
*/
@Override
public synchronized void processResult(int rc, String path, Object ctx,
String name) {
if (isStaleClient(ctx)) return;
if (LOG.isDebugEnabled()) {
LOG.debug("CreateNode result: " + rc + " for path: " + path
+ " connectionState: " + zkConnectionState +
" for " + this);
}
Code code = Code.get(rc);
if (isSuccess(code)) {
// we successfully created the znode. we are the leader. start monitoring
if (becomeActive()) {
monitorActiveStatus();
} else {
reJoinElectionAfterFailureToBecomeActive();
}
return;
}
if (isNodeExists(code)) {
if (createRetryCount == 0) {
// znode exists and we did not retry the operation. so a different
// instance has created it. become standby and monitor lock.
becomeStandby();
}
// if we had retried then the znode could have been created by our first
// attempt to the server (that we lost) and this node exists response is
// for the second attempt. verify this case via ephemeral node owner. this
// will happen on the callback for monitoring the lock.
monitorActiveStatus();
return;
}
String errorMessage = "Received create error from Zookeeper. code:"
+ code.toString() + " for path " + path;
LOG.debug(errorMessage);
if (shouldRetry(code)) {
if (createRetryCount < maxRetryNum) {
LOG.debug("Retrying createNode createRetryCount: " + createRetryCount);
++createRetryCount;
createLockNodeAsync();
return;
}
errorMessage = errorMessage
+ ". Not retrying further znode create connection errors.";
} else if (isSessionExpired(code)) {
// This isn't fatal - the client Watcher will re-join the election
LOG.warn("Lock acquisition failed because session was lost");
return;
}
fatalError(errorMessage);
}
/**
* interface implementation of Zookeeper callback for monitor (exists)
*/
@Override
public synchronized void processResult(int rc, String path, Object ctx,
Stat stat) {
if (isStaleClient(ctx)) return;
monitorLockNodePending = false;
assert wantToBeInElection :
"Got a StatNode result after quitting election";
if (LOG.isDebugEnabled()) {
LOG.debug("StatNode result: " + rc + " for path: " + path
+ " connectionState: " + zkConnectionState + " for " + this);
}
Code code = Code.get(rc);
if (isSuccess(code)) {
// the following owner check completes verification in case the lock znode
// creation was retried
if (stat.getEphemeralOwner() == zkClient.getSessionId()) {
// we own the lock znode. so we are the leader
if (!becomeActive()) {
reJoinElectionAfterFailureToBecomeActive();
}
} else {
// we dont own the lock znode. so we are a standby.
becomeStandby();
}
// the watch set by us will notify about changes
return;
}
if (isNodeDoesNotExist(code)) {
// the lock znode disappeared before we started monitoring it
enterNeutralMode();
joinElectionInternal();
return;
}
String errorMessage = "Received stat error from Zookeeper. code:"
+ code.toString();
LOG.debug(errorMessage);
if (shouldRetry(code)) {
if (statRetryCount < maxRetryNum) {
++statRetryCount;
monitorLockNodeAsync();
return;
}
errorMessage = errorMessage
+ ". Not retrying further znode monitoring connection errors.";
} else if (isSessionExpired(code)) {
// This isn't fatal - the client Watcher will re-join the election
LOG.warn("Lock monitoring failed because session was lost");
return;
}
fatalError(errorMessage);
}
/**
* We failed to become active. Re-join the election, but
* sleep for a few seconds after terminating our existing
* session, so that other nodes have a chance to become active.
* The failure to become active is already logged inside
* becomeActive().
*/
private void reJoinElectionAfterFailureToBecomeActive() {
reJoinElection(SLEEP_AFTER_FAILURE_TO_BECOME_ACTIVE);
}
/**
* interface implementation of Zookeeper watch events (connection and node),
* proxied by {@link WatcherWithClientRef}.
*/
synchronized void processWatchEvent(ZooKeeper zk, WatchedEvent event) {
Event.EventType eventType = event.getType();
if (isStaleClient(zk)) return;
if (LOG.isDebugEnabled()) {
LOG.debug("Watcher event type: " + eventType + " with state:"
+ event.getState() + " for path:" + event.getPath()
+ " connectionState: " + zkConnectionState
+ " for " + this);
}
if (eventType == Event.EventType.None) {
// the connection state has changed
switch (event.getState()) {
case SyncConnected:
LOG.info("Session connected.");
// if the listener was asked to move to safe state then it needs to
// be undone
ConnectionState prevConnectionState = zkConnectionState;
zkConnectionState = ConnectionState.CONNECTED;
if (prevConnectionState == ConnectionState.DISCONNECTED &&
wantToBeInElection) {
monitorActiveStatus();
}
break;
case Disconnected:
LOG.info("Session disconnected. Entering neutral mode...");
// ask the app to move to safe state because zookeeper connection
// is not active and we dont know our state
zkConnectionState = ConnectionState.DISCONNECTED;
enterNeutralMode();
break;
case Expired:
// the connection got terminated because of session timeout
// call listener to reconnect
LOG.info("Session expired. Entering neutral mode and rejoining...");
enterNeutralMode();
reJoinElection(0);
break;
case SaslAuthenticated:
LOG.info("Successfully authenticated to ZooKeeper using SASL.");
break;
default:
fatalError("Unexpected Zookeeper watch event state: "
+ event.getState());
break;
}
return;
}
// a watch on lock path in zookeeper has fired. so something has changed on
// the lock. ideally we should check that the path is the same as the lock
// path but trusting zookeeper for now
String path = event.getPath();
if (path != null) {
switch (eventType) {
case NodeDeleted:
if (state == State.ACTIVE) {
enterNeutralMode();
}
joinElectionInternal();
break;
case NodeDataChanged:
monitorActiveStatus();
break;
default:
if (LOG.isDebugEnabled()) {
LOG.debug("Unexpected node event: " + eventType + " for path: " + path);
}
monitorActiveStatus();
}
return;
}
// some unexpected error has occurred
fatalError("Unexpected watch error from Zookeeper");
}
/**
* Get a new zookeeper client instance. protected so that test class can
* inherit and pass in a mock object for zookeeper
*
* @return new zookeeper client instance
* @throws IOException
* @throws KeeperException zookeeper connectionloss exception
*/
protected synchronized ZooKeeper getNewZooKeeper() throws IOException,
KeeperException {
// Unfortunately, the ZooKeeper constructor connects to ZooKeeper and
// may trigger the Connected event immediately. So, if we register the
// watcher after constructing ZooKeeper, we may miss that event. Instead,
// we construct the watcher first, and have it block any events it receives
// before we can set its ZooKeeper reference.
watcher = new WatcherWithClientRef();
ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, watcher);
watcher.setZooKeeperRef(zk);
// Wait for the asynchronous success/failure. This may throw an exception
// if we don't connect within the session timeout.
watcher.waitForZKConnectionEvent(zkSessionTimeout);
for (ZKAuthInfo auth : zkAuthInfo) {
zk.addAuthInfo(auth.getScheme(), auth.getAuth());
}
return zk;
}
private void fatalError(String errorMessage) {
LOG.fatal(errorMessage);
reset();
appClient.notifyFatalError(errorMessage);
}
private void monitorActiveStatus() {
assert wantToBeInElection;
if (LOG.isDebugEnabled()) {
LOG.debug("Monitoring active leader for " + this);
}
statRetryCount = 0;
monitorLockNodeAsync();
}
private void joinElectionInternal() {
Preconditions.checkState(appData != null,
"trying to join election without any app data");
if (zkClient == null) {
if (!reEstablishSession()) {
fatalError("Failed to reEstablish connection with ZooKeeper");
return;
}
}
createRetryCount = 0;
wantToBeInElection = true;
createLockNodeAsync();
}
private void reJoinElection(int sleepTime) {
LOG.info("Trying to re-establish ZK session");
// Some of the test cases rely on expiring the ZK sessions and
// ensuring that the other node takes over. But, there's a race
// where the original lease holder could reconnect faster than the other
// thread manages to take the lock itself. This lock allows the
// tests to block the reconnection. It's a shame that this leaked
// into non-test code, but the lock is only acquired here so will never
// be contended.
sessionReestablishLockForTests.lock();
try {
terminateConnection();
sleepFor(sleepTime);
// Should not join election even before the SERVICE is reported
// as HEALTHY from ZKFC monitoring.
if (appData != null) {
joinElectionInternal();
} else {
LOG.info("Not joining election since service has not yet been " +
"reported as healthy.");
}
} finally {
sessionReestablishLockForTests.unlock();
}
}
/**
* Sleep for the given number of milliseconds.
* This is non-static, and separated out, so that unit tests
* can override the behavior not to sleep.
*/
@VisibleForTesting
protected void sleepFor(int sleepMs) {
if (sleepMs > 0) {
try {
Thread.sleep(sleepMs);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
@VisibleForTesting
void preventSessionReestablishmentForTests() {
sessionReestablishLockForTests.lock();
}
@VisibleForTesting
void allowSessionReestablishmentForTests() {
sessionReestablishLockForTests.unlock();
}
@VisibleForTesting
synchronized long getZKSessionIdForTests() {
if (zkClient != null) {
return zkClient.getSessionId();
} else {
return -1;
}
}
@VisibleForTesting
synchronized State getStateForTests() {
return state;
}
@VisibleForTesting
synchronized boolean isMonitorLockNodePending() {
return monitorLockNodePending;
}
private boolean reEstablishSession() {
int connectionRetryCount = 0;
boolean success = false;
while(!success && connectionRetryCount < maxRetryNum) {
if (LOG.isDebugEnabled()) {
LOG.debug("Establishing zookeeper connection for " + this);
}
try {
createConnection();
success = true;
} catch(IOException e) {
LOG.warn(e);
sleepFor(5000);
} catch(KeeperException e) {
LOG.warn(e);
sleepFor(5000);
}
++connectionRetryCount;
}
return success;
}
private void createConnection() throws IOException, KeeperException {
if (zkClient != null) {
try {
zkClient.close();
} catch (InterruptedException e) {
throw new IOException("Interrupted while closing ZK",
e);
}
zkClient = null;
watcher = null;
}
zkClient = getNewZooKeeper();
if (LOG.isDebugEnabled()) {
LOG.debug("Created new connection for " + this);
}
}
@InterfaceAudience.Private
public synchronized void terminateConnection() {
if (zkClient == null) {
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Terminating ZK connection for " + this);
}
ZooKeeper tempZk = zkClient;
zkClient = null;
watcher = null;
try {
tempZk.close();
} catch(InterruptedException e) {
LOG.warn(e);
}
zkConnectionState = ConnectionState.TERMINATED;
wantToBeInElection = false;
}
private void reset() {
state = State.INIT;
terminateConnection();
}
private boolean becomeActive() {
assert wantToBeInElection;
if (state == State.ACTIVE) {
// already active
return true;
}
try {
Stat oldBreadcrumbStat = fenceOldActive();
writeBreadCrumbNode(oldBreadcrumbStat);
if (LOG.isDebugEnabled()) {
LOG.debug("Becoming active for " + this);
}
appClient.becomeActive();
state = State.ACTIVE;
return true;
} catch (Exception e) {
LOG.warn("Exception handling the winning of election", e);
// Caller will handle quitting and rejoining the election.
return false;
}
}
/**
* Write the "ActiveBreadCrumb" node, indicating that this node may need
* to be fenced on failover.
* @param oldBreadcrumbStat
*/
private void writeBreadCrumbNode(Stat oldBreadcrumbStat)
throws KeeperException, InterruptedException {
Preconditions.checkState(appData != null, "no appdata");
LOG.info("Writing znode " + zkBreadCrumbPath +
" to indicate that the local node is the most recent active...");
if (oldBreadcrumbStat == null) {
// No previous active, just create the node
createWithRetries(zkBreadCrumbPath, appData, zkAcl,
CreateMode.PERSISTENT);
} else {
// There was a previous active, update the node
setDataWithRetries(zkBreadCrumbPath, appData, oldBreadcrumbStat.getVersion());
}
}
/**
* Try to delete the "ActiveBreadCrumb" node when gracefully giving up
* active status.
* If this fails, it will simply warn, since the graceful release behavior
* is only an optimization.
*/
private void tryDeleteOwnBreadCrumbNode() {
assert state == State.ACTIVE;
LOG.info("Deleting bread-crumb of active node...");
// Sanity check the data. This shouldn't be strictly necessary,
// but better to play it safe.
Stat stat = new Stat();
byte[] data = null;
try {
data = zkClient.getData(zkBreadCrumbPath, false, stat);
if (!Arrays.equals(data, appData)) {
throw new IllegalStateException(
"We thought we were active, but in fact " +
"the active znode had the wrong data: " +
StringUtils.byteToHexString(data) + " (stat=" + stat + ")");
}
deleteWithRetries(zkBreadCrumbPath, stat.getVersion());
} catch (Exception e) {
LOG.warn("Unable to delete our own bread-crumb of being active at " +
zkBreadCrumbPath + ": " + e.getLocalizedMessage() + ". " +
"Expecting to be fenced by the next active.");
}
}
/**
* If there is a breadcrumb node indicating that another node may need
* fencing, try to fence that node.
* @return the Stat of the breadcrumb node that was read, or null
* if no breadcrumb node existed
*/
private Stat fenceOldActive() throws InterruptedException, KeeperException {
final Stat stat = new Stat();
byte[] data;
LOG.info("Checking for any old active which needs to be fenced...");
try {
data = zkDoWithRetries(new ZKAction<byte[]>() {
@Override
public byte[] run() throws KeeperException, InterruptedException {
return zkClient.getData(zkBreadCrumbPath, false, stat);
}
});
} catch (KeeperException ke) {
if (isNodeDoesNotExist(ke.code())) {
LOG.info("No old node to fence");
return null;
}
// If we failed to read for any other reason, then likely we lost
// our session, or we don't have permissions, etc. In any case,
// we probably shouldn't become active, and failing the whole
// thing is the best bet.
throw ke;
}
LOG.info("Old node exists: " + StringUtils.byteToHexString(data));
if (Arrays.equals(data, appData)) {
LOG.info("But old node has our own data, so don't need to fence it.");
} else {
appClient.fenceOldActive(data);
}
return stat;
}
private void becomeStandby() {
if (state != State.STANDBY) {
if (LOG.isDebugEnabled()) {
LOG.debug("Becoming standby for " + this);
}
state = State.STANDBY;
appClient.becomeStandby();
}
}
private void enterNeutralMode() {
if (state != State.NEUTRAL) {
if (LOG.isDebugEnabled()) {
LOG.debug("Entering neutral mode for " + this);
}
state = State.NEUTRAL;
appClient.enterNeutralMode();
}
}
private void createLockNodeAsync() {
zkClient.create(zkLockFilePath, appData, zkAcl, CreateMode.EPHEMERAL,
this, zkClient);
}
private void monitorLockNodeAsync() {
if (monitorLockNodePending && monitorLockNodeClient == zkClient) {
LOG.info("Ignore duplicate monitor lock-node request.");
return;
}
monitorLockNodePending = true;
monitorLockNodeClient = zkClient;
zkClient.exists(zkLockFilePath,
watcher, this,
zkClient);
}
private String createWithRetries(final String path, final byte[] data,
final List<ACL> acl, final CreateMode mode)
throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction<String>() {
@Override
public String run() throws KeeperException, InterruptedException {
return zkClient.create(path, data, acl, mode);
}
});
}
private byte[] getDataWithRetries(final String path, final boolean watch,
final Stat stat) throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction<byte[]>() {
@Override
public byte[] run() throws KeeperException, InterruptedException {
return zkClient.getData(path, watch, stat);
}
});
}
private Stat setDataWithRetries(final String path, final byte[] data,
final int version) throws InterruptedException, KeeperException {
return zkDoWithRetries(new ZKAction<Stat>() {
@Override
public Stat run() throws KeeperException, InterruptedException {
return zkClient.setData(path, data, version);
}
});
}
private void deleteWithRetries(final String path, final int version)
throws KeeperException, InterruptedException {
zkDoWithRetries(new ZKAction<Void>() {
@Override
public Void run() throws KeeperException, InterruptedException {
zkClient.delete(path, version);
return null;
}
});
}
private <T> T zkDoWithRetries(ZKAction<T> action) throws KeeperException,
InterruptedException {
int retry = 0;
while (true) {
try {
return action.run();
} catch (KeeperException ke) {
if (shouldRetry(ke.code()) && ++retry < maxRetryNum) {
continue;
}
throw ke;
}
}
}
private interface ZKAction<T> {
T run() throws KeeperException, InterruptedException;
}
/**
* The callbacks and watchers pass a reference to the ZK client
* which made the original call. We don't want to take action
* based on any callbacks from prior clients after we quit
* the election.
* @param ctx the ZK client passed into the watcher
* @return true if it matches the current client
*/
private synchronized boolean isStaleClient(Object ctx) {
Preconditions.checkNotNull(ctx);
if (zkClient != (ZooKeeper)ctx) {
LOG.warn("Ignoring stale result from old client with sessionId " +
String.format("0x%08x", ((ZooKeeper)ctx).getSessionId()));
return true;
}
return false;
}
/**
* Watcher implementation which keeps a reference around to the
* original ZK connection, and passes it back along with any
* events.
*/
private final class WatcherWithClientRef implements Watcher {
private ZooKeeper zk;
/**
* Latch fired whenever any event arrives. This is used in order
* to wait for the Connected event when the client is first created.
*/
private CountDownLatch hasReceivedEvent = new CountDownLatch(1);
/**
* Latch used to wait until the reference to ZooKeeper is set.
*/
private CountDownLatch hasSetZooKeeper = new CountDownLatch(1);
/**
* Waits for the next event from ZooKeeper to arrive.
*
* @param connectionTimeoutMs zookeeper connection timeout in milliseconds
* @throws KeeperException if the connection attempt times out. This will
* be a ZooKeeper ConnectionLoss exception code.
* @throws IOException if interrupted while connecting to ZooKeeper
*/
private void waitForZKConnectionEvent(int connectionTimeoutMs)
throws KeeperException, IOException {
try {
if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) {
LOG.error("Connection timed out: couldn't connect to ZooKeeper in "
+ connectionTimeoutMs + " milliseconds");
zk.close();
throw KeeperException.create(Code.CONNECTIONLOSS);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException(
"Interrupted when connecting to zookeeper server", e);
}
}
private void setZooKeeperRef(ZooKeeper zk) {
Preconditions.checkState(this.zk == null,
"zk already set -- must be set exactly once");
this.zk = zk;
hasSetZooKeeper.countDown();
}
@Override
public void process(WatchedEvent event) {
hasReceivedEvent.countDown();
try {
if (!hasSetZooKeeper.await(zkSessionTimeout, TimeUnit.MILLISECONDS)) {
LOG.debug("Event received with stale zk");
}
ActiveStandbyElector.this.processWatchEvent(
zk, event);
} catch (Throwable t) {
fatalError(
"Failed to process watcher event " + event + ": " +
StringUtils.stringifyException(t));
}
}
}
private static boolean isSuccess(Code code) {
return (code == Code.OK);
}
private static boolean isNodeExists(Code code) {
return (code == Code.NODEEXISTS);
}
private static boolean isNodeDoesNotExist(Code code) {
return (code == Code.NONODE);
}
private static boolean isSessionExpired(Code code) {
return (code == Code.SESSIONEXPIRED);
}
private static boolean shouldRetry(Code code) {
return code == Code.CONNECTIONLOSS || code == Code.OPERATIONTIMEOUT;
}
@Override
public String toString() {
return "elector id=" + System.identityHashCode(this) +
" appData=" +
((appData == null) ? "null" : StringUtils.byteToHexString(appData)) +
" cb=" + appClient;
}
}
| 39,017 | 33.076856 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCRpcServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService;
import org.apache.hadoop.ha.protocolPB.ZKFCProtocolPB;
import org.apache.hadoop.ha.protocolPB.ZKFCProtocolServerSideTranslatorPB;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RPC.Server;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authorize.PolicyProvider;
import com.google.protobuf.BlockingService;
@InterfaceAudience.LimitedPrivate("HDFS")
@InterfaceStability.Evolving
public class ZKFCRpcServer implements ZKFCProtocol {
private static final int HANDLER_COUNT = 3;
private final ZKFailoverController zkfc;
private Server server;
ZKFCRpcServer(Configuration conf,
InetSocketAddress bindAddr,
ZKFailoverController zkfc,
PolicyProvider policy) throws IOException {
this.zkfc = zkfc;
RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
ProtobufRpcEngine.class);
ZKFCProtocolServerSideTranslatorPB translator =
new ZKFCProtocolServerSideTranslatorPB(this);
BlockingService service = ZKFCProtocolService
.newReflectiveBlockingService(translator);
this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
.setInstance(service).setBindAddress(bindAddr.getHostName())
.setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
.setVerbose(false).build();
// set service-level authorization security policy
if (conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
server.refreshServiceAcl(conf, policy);
}
}
void start() {
this.server.start();
}
public InetSocketAddress getAddress() {
return server.getListenerAddress();
}
void stopAndJoin() throws InterruptedException {
this.server.stop();
this.server.join();
}
@Override
public void cedeActive(int millisToCede) throws IOException,
AccessControlException {
zkfc.checkRpcAdminAccess();
zkfc.cedeActive(millisToCede);
}
@Override
public void gracefulFailover() throws IOException, AccessControlException {
zkfc.checkRpcAdminAccess();
zkfc.gracefulFailoverToYou();
}
}
| 3,370 | 33.397959 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configured;
import com.google.common.annotations.VisibleForTesting;
import com.jcraft.jsch.ChannelExec;
import com.jcraft.jsch.JSch;
import com.jcraft.jsch.JSchException;
import com.jcraft.jsch.Session;
/**
* This fencing implementation sshes to the target node and uses
* <code>fuser</code> to kill the process listening on the service's
* TCP port. This is more accurate than using "jps" since it doesn't
* require parsing, and will work even if there are multiple service
* processes running on the same machine.<p>
* It returns a successful status code if:
* <ul>
* <li><code>fuser</code> indicates it successfully killed a process, <em>or</em>
* <li><code>nc -z</code> indicates that nothing is listening on the target port
* </ul>
* <p>
* This fencing mechanism is configured as following in the fencing method
* list:
* <code>sshfence([[username][:ssh-port]])</code>
* where the optional argument specifies the username and port to use
* with ssh.
* <p>
* In order to achieve passwordless SSH, the operator must also configure
* <code>dfs.ha.fencing.ssh.private-key-files<code> to point to an
* SSH key that has passphrase-less access to the given username and host.
*/
public class SshFenceByTcpPort extends Configured
implements FenceMethod {
static final Log LOG = LogFactory.getLog(
SshFenceByTcpPort.class);
static final String CONF_CONNECT_TIMEOUT_KEY =
"dfs.ha.fencing.ssh.connect-timeout";
private static final int CONF_CONNECT_TIMEOUT_DEFAULT =
30*1000;
static final String CONF_IDENTITIES_KEY =
"dfs.ha.fencing.ssh.private-key-files";
/**
* Verify that the argument, if given, in the conf is parseable.
*/
@Override
public void checkArgs(String argStr) throws BadFencingConfigurationException {
if (argStr != null) {
new Args(argStr);
}
}
@Override
public boolean tryFence(HAServiceTarget target, String argsStr)
throws BadFencingConfigurationException {
Args args = new Args(argsStr);
InetSocketAddress serviceAddr = target.getAddress();
String host = serviceAddr.getHostName();
Session session;
try {
session = createSession(serviceAddr.getHostName(), args);
} catch (JSchException e) {
LOG.warn("Unable to create SSH session", e);
return false;
}
LOG.info("Connecting to " + host + "...");
try {
session.connect(getSshConnectTimeout());
} catch (JSchException e) {
LOG.warn("Unable to connect to " + host
+ " as user " + args.user, e);
return false;
}
LOG.info("Connected to " + host);
try {
return doFence(session, serviceAddr);
} catch (JSchException e) {
LOG.warn("Unable to achieve fencing on remote host", e);
return false;
} finally {
session.disconnect();
}
}
private Session createSession(String host, Args args) throws JSchException {
JSch jsch = new JSch();
for (String keyFile : getKeyFiles()) {
jsch.addIdentity(keyFile);
}
JSch.setLogger(new LogAdapter());
Session session = jsch.getSession(args.user, host, args.sshPort);
session.setConfig("StrictHostKeyChecking", "no");
return session;
}
private boolean doFence(Session session, InetSocketAddress serviceAddr)
throws JSchException {
int port = serviceAddr.getPort();
try {
LOG.info("Looking for process running on port " + port);
int rc = execCommand(session,
"PATH=$PATH:/sbin:/usr/sbin fuser -v -k -n tcp " + port);
if (rc == 0) {
LOG.info("Successfully killed process that was " +
"listening on port " + port);
// exit code 0 indicates the process was successfully killed.
return true;
} else if (rc == 1) {
// exit code 1 indicates either that the process was not running
// or that fuser didn't have root privileges in order to find it
// (eg running as a different user)
LOG.info(
"Indeterminate response from trying to kill service. " +
"Verifying whether it is running using nc...");
rc = execCommand(session, "nc -z " + serviceAddr.getHostName() +
" " + serviceAddr.getPort());
if (rc == 0) {
// the service is still listening - we are unable to fence
LOG.warn("Unable to fence - it is running but we cannot kill it");
return false;
} else {
LOG.info("Verified that the service is down.");
return true;
}
} else {
// other
}
LOG.info("rc: " + rc);
return rc == 0;
} catch (InterruptedException e) {
LOG.warn("Interrupted while trying to fence via ssh", e);
return false;
} catch (IOException e) {
LOG.warn("Unknown failure while trying to fence via ssh", e);
return false;
}
}
/**
* Execute a command through the ssh session, pumping its
* stderr and stdout to our own logs.
*/
private int execCommand(Session session, String cmd)
throws JSchException, InterruptedException, IOException {
LOG.debug("Running cmd: " + cmd);
ChannelExec exec = null;
try {
exec = (ChannelExec)session.openChannel("exec");
exec.setCommand(cmd);
exec.setInputStream(null);
exec.connect();
// Pump stdout of the command to our WARN logs
StreamPumper outPumper = new StreamPumper(LOG, cmd + " via ssh",
exec.getInputStream(), StreamPumper.StreamType.STDOUT);
outPumper.start();
// Pump stderr of the command to our WARN logs
StreamPumper errPumper = new StreamPumper(LOG, cmd + " via ssh",
exec.getErrStream(), StreamPumper.StreamType.STDERR);
errPumper.start();
outPumper.join();
errPumper.join();
return exec.getExitStatus();
} finally {
cleanup(exec);
}
}
private static void cleanup(ChannelExec exec) {
if (exec != null) {
try {
exec.disconnect();
} catch (Throwable t) {
LOG.warn("Couldn't disconnect ssh channel", t);
}
}
}
private int getSshConnectTimeout() {
return getConf().getInt(
CONF_CONNECT_TIMEOUT_KEY, CONF_CONNECT_TIMEOUT_DEFAULT);
}
private Collection<String> getKeyFiles() {
return getConf().getTrimmedStringCollection(CONF_IDENTITIES_KEY);
}
/**
* Container for the parsed arg line for this fencing method.
*/
@VisibleForTesting
static class Args {
private static final Pattern USER_PORT_RE = Pattern.compile(
"([^:]+?)?(?:\\:(\\d+))?");
private static final int DEFAULT_SSH_PORT = 22;
String user;
int sshPort;
public Args(String arg)
throws BadFencingConfigurationException {
user = System.getProperty("user.name");
sshPort = DEFAULT_SSH_PORT;
// Parse optional user and ssh port
if (arg != null && !arg.isEmpty()) {
Matcher m = USER_PORT_RE.matcher(arg);
if (!m.matches()) {
throw new BadFencingConfigurationException(
"Unable to parse user and SSH port: "+ arg);
}
if (m.group(1) != null) {
user = m.group(1);
}
if (m.group(2) != null) {
sshPort = parseConfiggedPort(m.group(2));
}
}
}
private Integer parseConfiggedPort(String portStr)
throws BadFencingConfigurationException {
try {
return Integer.valueOf(portStr);
} catch (NumberFormatException nfe) {
throw new BadFencingConfigurationException(
"Port number '" + portStr + "' invalid");
}
}
}
/**
* Adapter from JSch's logger interface to our log4j
*/
private static class LogAdapter implements com.jcraft.jsch.Logger {
static final Log LOG = LogFactory.getLog(
SshFenceByTcpPort.class.getName() + ".jsch");
@Override
public boolean isEnabled(int level) {
switch (level) {
case com.jcraft.jsch.Logger.DEBUG:
return LOG.isDebugEnabled();
case com.jcraft.jsch.Logger.INFO:
return LOG.isInfoEnabled();
case com.jcraft.jsch.Logger.WARN:
return LOG.isWarnEnabled();
case com.jcraft.jsch.Logger.ERROR:
return LOG.isErrorEnabled();
case com.jcraft.jsch.Logger.FATAL:
return LOG.isFatalEnabled();
default:
return false;
}
}
@Override
public void log(int level, String message) {
switch (level) {
case com.jcraft.jsch.Logger.DEBUG:
LOG.debug(message);
break;
case com.jcraft.jsch.Logger.INFO:
LOG.info(message);
break;
case com.jcraft.jsch.Logger.WARN:
LOG.warn(message);
break;
case com.jcraft.jsch.Logger.ERROR:
LOG.error(message);
break;
case com.jcraft.jsch.Logger.FATAL:
LOG.fatal(message);
break;
default:
break;
}
}
}
}
| 10,066 | 30.557994 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFCProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.retry.Idempotent;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.KerberosInfo;
import java.io.IOException;
/**
* Protocol exposed by the ZKFailoverController, allowing for graceful
* failover.
*/
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface ZKFCProtocol {
/**
* Initial version of the protocol
*/
public static final long versionID = 1L;
/**
* Request that this service yield from the active node election for the
* specified time period.
*
* If the node is not currently active, it simply prevents any attempts
* to become active for the specified time period. Otherwise, it first
* tries to transition the local service to standby state, and then quits
* the election.
*
* If the attempt to transition to standby succeeds, then the ZKFC receiving
* this RPC will delete its own breadcrumb node in ZooKeeper. Thus, the
* next node to become active will not run any fencing process. Otherwise,
* the breadcrumb will be left, such that the next active will fence this
* node.
*
* After the specified time period elapses, the node will attempt to re-join
* the election, provided that its service is healthy.
*
* If the node has previously been instructed to cede active, and is still
* within the specified time period, the later command's time period will
* take precedence, resetting the timer.
*
* A call to cedeActive which specifies a 0 or negative time period will
* allow the target node to immediately rejoin the election, so long as
* it is healthy.
*
* @param millisToCede period for which the node should not attempt to
* become active
* @throws IOException if the operation fails
* @throws AccessControlException if the operation is disallowed
*/
@Idempotent
public void cedeActive(int millisToCede)
throws IOException, AccessControlException;
/**
* Request that this node try to become active through a graceful failover.
*
* If the node is already active, this is a no-op and simply returns success
* without taking any further action.
*
* If the node is not healthy, it will throw an exception indicating that it
* is not able to become active.
*
* If the node is healthy and not active, it will try to initiate a graceful
* failover to become active, returning only when it has successfully become
* active. See {@link ZKFailoverController#gracefulFailoverToYou()} for the
* implementation details.
*
* If the node fails to successfully coordinate the failover, throws an
* exception indicating the reason for failure.
*
* @throws IOException if graceful failover fails
* @throws AccessControlException if the operation is disallowed
*/
@Idempotent
public void gracefulFailover()
throws IOException, AccessControlException;
}
| 4,052 | 38.735294 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import javax.net.SocketFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HARequestSource;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* This class is the client side translator to translate the requests made on
* {@link HAServiceProtocol} interfaces to the RPC server implementing
* {@link HAServiceProtocolPB}.
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
public class HAServiceProtocolClientSideTranslatorPB implements
HAServiceProtocol, Closeable, ProtocolTranslator {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final static MonitorHealthRequestProto MONITOR_HEALTH_REQ =
MonitorHealthRequestProto.newBuilder().build();
private final static GetServiceStatusRequestProto GET_SERVICE_STATUS_REQ =
GetServiceStatusRequestProto.newBuilder().build();
private final HAServiceProtocolPB rpcProxy;
public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr,
Configuration conf) throws IOException {
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
ProtobufRpcEngine.class);
rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf);
}
public HAServiceProtocolClientSideTranslatorPB(
InetSocketAddress addr, Configuration conf,
SocketFactory socketFactory, int timeout) throws IOException {
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
ProtobufRpcEngine.class);
rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
RPC.getProtocolVersion(HAServiceProtocolPB.class), addr,
UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
}
@Override
public void monitorHealth() throws IOException {
try {
rpcProxy.monitorHealth(NULL_CONTROLLER, MONITOR_HEALTH_REQ);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void transitionToActive(StateChangeRequestInfo reqInfo) throws IOException {
try {
TransitionToActiveRequestProto req =
TransitionToActiveRequestProto.newBuilder()
.setReqInfo(convert(reqInfo)).build();
rpcProxy.transitionToActive(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void transitionToStandby(StateChangeRequestInfo reqInfo) throws IOException {
try {
TransitionToStandbyRequestProto req =
TransitionToStandbyRequestProto.newBuilder()
.setReqInfo(convert(reqInfo)).build();
rpcProxy.transitionToStandby(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public HAServiceStatus getServiceStatus() throws IOException {
GetServiceStatusResponseProto status;
try {
status = rpcProxy.getServiceStatus(NULL_CONTROLLER,
GET_SERVICE_STATUS_REQ);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
HAServiceStatus ret = new HAServiceStatus(
convert(status.getState()));
if (status.getReadyToBecomeActive()) {
ret.setReadyToBecomeActive();
} else {
ret.setNotReadyToBecomeActive(status.getNotReadyReason());
}
return ret;
}
private HAServiceState convert(HAServiceStateProto state) {
switch(state) {
case ACTIVE:
return HAServiceState.ACTIVE;
case STANDBY:
return HAServiceState.STANDBY;
case INITIALIZING:
default:
return HAServiceState.INITIALIZING;
}
}
private HAStateChangeRequestInfoProto convert(StateChangeRequestInfo reqInfo) {
HARequestSource src;
switch (reqInfo.getSource()) {
case REQUEST_BY_USER:
src = HARequestSource.REQUEST_BY_USER;
break;
case REQUEST_BY_USER_FORCED:
src = HARequestSource.REQUEST_BY_USER_FORCED;
break;
case REQUEST_BY_ZKFC:
src = HARequestSource.REQUEST_BY_ZKFC;
break;
default:
throw new IllegalArgumentException("Bad source: " + reqInfo.getSource());
}
return HAStateChangeRequestInfoProto.newBuilder()
.setReqSource(src)
.build();
}
@Override
public void close() {
RPC.stopProxy(rpcProxy);
}
@Override
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
}
| 6,530 | 35.082873 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@ProtocolInfo(protocolName = "org.apache.hadoop.ha.ZKFCProtocol",
protocolVersion = 1)
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface ZKFCProtocolPB extends
ZKFCProtocolService.BlockingInterface, VersionedProtocol {
/**
* If any methods need annotation, it can be added here
*/
}
| 1,654 | 41.435897 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(
serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@ProtocolInfo(protocolName = "org.apache.hadoop.ha.HAServiceProtocol",
protocolVersion = 1)
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface HAServiceProtocolPB extends
HAServiceProtocolService.BlockingInterface, VersionedProtocol {
/**
* If any methods need annotation, it can be added here
*/
}
| 1,680 | 41.025 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolClientSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import javax.net.SocketFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.ZKFCProtocol;
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto;
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class ZKFCProtocolClientSideTranslatorPB implements
ZKFCProtocol, Closeable, ProtocolTranslator {
private final static RpcController NULL_CONTROLLER = null;
private final ZKFCProtocolPB rpcProxy;
public ZKFCProtocolClientSideTranslatorPB(
InetSocketAddress addr, Configuration conf,
SocketFactory socketFactory, int timeout) throws IOException {
RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
ProtobufRpcEngine.class);
rpcProxy = RPC.getProxy(ZKFCProtocolPB.class,
RPC.getProtocolVersion(ZKFCProtocolPB.class), addr,
UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
}
@Override
public void cedeActive(int millisToCede) throws IOException,
AccessControlException {
try {
CedeActiveRequestProto req = CedeActiveRequestProto.newBuilder()
.setMillisToCede(millisToCede)
.build();
rpcProxy.cedeActive(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void gracefulFailover() throws IOException, AccessControlException {
try {
rpcProxy.gracefulFailover(NULL_CONTROLLER,
GracefulFailoverRequestProto.getDefaultInstance());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void close() {
RPC.stopProxy(rpcProxy);
}
@Override
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
}
| 3,116 | 33.252747 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha.protocolPB;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAStateChangeRequestInfoProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* This class is used on the server side. Calls come across the wire for the
* for protocol {@link HAServiceProtocolPB}.
* This class translates the PB data types
* to the native data types used inside the NN as specified in the generic
* ClientProtocol.
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
public class HAServiceProtocolServerSideTranslatorPB implements
HAServiceProtocolPB {
private final HAServiceProtocol server;
private static final MonitorHealthResponseProto MONITOR_HEALTH_RESP =
MonitorHealthResponseProto.newBuilder().build();
private static final TransitionToActiveResponseProto TRANSITION_TO_ACTIVE_RESP =
TransitionToActiveResponseProto.newBuilder().build();
private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP =
TransitionToStandbyResponseProto.newBuilder().build();
private static final Log LOG = LogFactory.getLog(
HAServiceProtocolServerSideTranslatorPB.class);
public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {
this.server = server;
}
@Override
public MonitorHealthResponseProto monitorHealth(RpcController controller,
MonitorHealthRequestProto request) throws ServiceException {
try {
server.monitorHealth();
return MONITOR_HEALTH_RESP;
} catch(IOException e) {
throw new ServiceException(e);
}
}
private StateChangeRequestInfo convert(HAStateChangeRequestInfoProto proto) {
RequestSource src;
switch (proto.getReqSource()) {
case REQUEST_BY_USER:
src = RequestSource.REQUEST_BY_USER;
break;
case REQUEST_BY_USER_FORCED:
src = RequestSource.REQUEST_BY_USER_FORCED;
break;
case REQUEST_BY_ZKFC:
src = RequestSource.REQUEST_BY_ZKFC;
break;
default:
LOG.warn("Unknown request source: " + proto.getReqSource());
src = null;
}
return new StateChangeRequestInfo(src);
}
@Override
public TransitionToActiveResponseProto transitionToActive(
RpcController controller, TransitionToActiveRequestProto request)
throws ServiceException {
try {
server.transitionToActive(convert(request.getReqInfo()));
return TRANSITION_TO_ACTIVE_RESP;
} catch(IOException e) {
throw new ServiceException(e);
}
}
@Override
public TransitionToStandbyResponseProto transitionToStandby(
RpcController controller, TransitionToStandbyRequestProto request)
throws ServiceException {
try {
server.transitionToStandby(convert(request.getReqInfo()));
return TRANSITION_TO_STANDBY_RESP;
} catch(IOException e) {
throw new ServiceException(e);
}
}
@Override
public GetServiceStatusResponseProto getServiceStatus(RpcController controller,
GetServiceStatusRequestProto request) throws ServiceException {
HAServiceStatus s;
try {
s = server.getServiceStatus();
} catch(IOException e) {
throw new ServiceException(e);
}
HAServiceStateProto retState;
switch (s.getState()) {
case ACTIVE:
retState = HAServiceStateProto.ACTIVE;
break;
case STANDBY:
retState = HAServiceStateProto.STANDBY;
break;
case INITIALIZING:
default:
retState = HAServiceStateProto.INITIALIZING;
break;
}
GetServiceStatusResponseProto.Builder ret =
GetServiceStatusResponseProto.newBuilder()
.setState(retState)
.setReadyToBecomeActive(s.isReadyToBecomeActive());
if (!s.isReadyToBecomeActive()) {
ret.setNotReadyReason(s.getNotReadyReason());
}
return ret.build();
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(HAServiceProtocolPB.class);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
if (!protocol.equals(RPC.getProtocolName(HAServiceProtocolPB.class))) {
throw new IOException("Serverside implements " +
RPC.getProtocolName(HAServiceProtocolPB.class) +
". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(HAServiceProtocolPB.class),
HAServiceProtocolPB.class);
}
}
| 6,760 | 36.561111 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/ZKFCProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ha.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.ha.ZKFCProtocol;
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveRequestProto;
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.CedeActiveResponseProto;
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverRequestProto;
import org.apache.hadoop.ha.proto.ZKFCProtocolProtos.GracefulFailoverResponseProto;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@InterfaceAudience.Private
@InterfaceStability.Stable
public class ZKFCProtocolServerSideTranslatorPB implements
ZKFCProtocolPB {
private final ZKFCProtocol server;
public ZKFCProtocolServerSideTranslatorPB(ZKFCProtocol server) {
this.server = server;
}
@Override
public CedeActiveResponseProto cedeActive(RpcController controller,
CedeActiveRequestProto request) throws ServiceException {
try {
server.cedeActive(request.getMillisToCede());
return CedeActiveResponseProto.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public GracefulFailoverResponseProto gracefulFailover(
RpcController controller, GracefulFailoverRequestProto request)
throws ServiceException {
try {
server.gracefulFailover();
return GracefulFailoverResponseProto.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return RPC.getProtocolVersion(ZKFCProtocolPB.class);
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
if (!protocol.equals(RPC.getProtocolName(ZKFCProtocolPB.class))) {
throw new IOException("Serverside implements " +
RPC.getProtocolName(ZKFCProtocolPB.class) +
". The following requested protocol is unknown: " + protocol);
}
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
RPC.getProtocolVersion(ZKFCProtocolPB.class),
HAServiceProtocolPB.class);
}
}
| 3,250 | 35.52809 | 83 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.