repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.util.Iterator; import java.util.Map; import java.util.NavigableMap; import java.util.NoSuchElementException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** Provide an cyclic {@link Iterator} for a {@link NavigableMap}. * The {@link Iterator} navigates the entries of the map * according to the map's ordering. * If the {@link Iterator} hits the last entry of the map, * it will then continue from the first entry. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class CyclicIteration<K, V> implements Iterable<Map.Entry<K, V>> { private final NavigableMap<K, V> navigablemap; private final NavigableMap<K, V> tailmap; /** Construct an {@link Iterable} object, * so that an {@link Iterator} can be created * for iterating the given {@link NavigableMap}. * The iteration begins from the starting key exclusively. */ public CyclicIteration(NavigableMap<K, V> navigablemap, K startingkey) { if (navigablemap == null || navigablemap.isEmpty()) { this.navigablemap = null; this.tailmap = null; } else { this.navigablemap = navigablemap; this.tailmap = navigablemap.tailMap(startingkey, false); } } @Override public Iterator<Map.Entry<K, V>> iterator() { return new CyclicIterator(); } /** An {@link Iterator} for {@link CyclicIteration}. */ private class CyclicIterator implements Iterator<Map.Entry<K, V>> { private boolean hasnext; private Iterator<Map.Entry<K, V>> i; /** The first entry to begin. */ private final Map.Entry<K, V> first; /** The next entry. */ private Map.Entry<K, V> next; private CyclicIterator() { hasnext = navigablemap != null; if (hasnext) { i = tailmap.entrySet().iterator(); first = nextEntry(); next = first; } else { i = null; first = null; next = null; } } private Map.Entry<K, V> nextEntry() { if (!i.hasNext()) { i = navigablemap.entrySet().iterator(); } return i.next(); } @Override public boolean hasNext() { return hasnext; } @Override public Map.Entry<K, V> next() { if (!hasnext) { throw new NoSuchElementException(); } final Map.Entry<K, V> curr = next; next = nextEntry(); hasnext = !next.equals(first); return curr; } /** Not supported */ @Override public void remove() { throw new UnsupportedOperationException("Not supported"); } } }
3,463
29.121739
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import static org.apache.hadoop.util.Time.monotonicNow; /** * a class to throttle the data transfers. * This class is thread safe. It can be shared by multiple threads. * The parameter bandwidthPerSec specifies the total bandwidth shared by * threads. */ public class DataTransferThrottler { private final long period; // period over which bw is imposed private final long periodExtension; // Max period over which bw accumulates. private long bytesPerPeriod; // total number of bytes can be sent in each period private long curPeriodStart; // current period starting time private long curReserve; // remaining bytes can be sent in the period private long bytesAlreadyUsed; /** Constructor * @param bandwidthPerSec bandwidth allowed in bytes per second. */ public DataTransferThrottler(long bandwidthPerSec) { this(500, bandwidthPerSec); // by default throttling period is 500ms } /** * Constructor * @param period in milliseconds. Bandwidth is enforced over this * period. * @param bandwidthPerSec bandwidth allowed in bytes per second. */ public DataTransferThrottler(long period, long bandwidthPerSec) { this.curPeriodStart = monotonicNow(); this.period = period; this.curReserve = this.bytesPerPeriod = bandwidthPerSec*period/1000; this.periodExtension = period*3; } /** * @return current throttle bandwidth in bytes per second. */ public synchronized long getBandwidth() { return bytesPerPeriod*1000/period; } /** * Sets throttle bandwidth. This takes affect latest by the end of current * period. */ public synchronized void setBandwidth(long bytesPerSecond) { if ( bytesPerSecond <= 0 ) { throw new IllegalArgumentException("" + bytesPerSecond); } bytesPerPeriod = bytesPerSecond*period/1000; } /** Given the numOfBytes sent/received since last time throttle was called, * make the current thread sleep if I/O rate is too fast * compared to the given bandwidth. * * @param numOfBytes * number of bytes sent/received since last time throttle was called */ public synchronized void throttle(long numOfBytes) { throttle(numOfBytes, null); } /** Given the numOfBytes sent/received since last time throttle was called, * make the current thread sleep if I/O rate is too fast * compared to the given bandwidth. Allows for optional external cancelation. * * @param numOfBytes * number of bytes sent/received since last time throttle was called * @param canceler * optional canceler to check for abort of throttle */ public synchronized void throttle(long numOfBytes, Canceler canceler) { if ( numOfBytes <= 0 ) { return; } curReserve -= numOfBytes; bytesAlreadyUsed += numOfBytes; while (curReserve <= 0) { if (canceler != null && canceler.isCancelled()) { return; } long now = monotonicNow(); long curPeriodEnd = curPeriodStart + period; if ( now < curPeriodEnd ) { // Wait for next period so that curReserve can be increased. try { wait( curPeriodEnd - now ); } catch (InterruptedException e) { // Abort throttle and reset interrupted status to make sure other // interrupt handling higher in the call stack executes. Thread.currentThread().interrupt(); break; } } else if ( now < (curPeriodStart + periodExtension)) { curPeriodStart = curPeriodEnd; curReserve += bytesPerPeriod; } else { // discard the prev period. Throttler might not have // been used for a long time. curPeriodStart = now; curReserve = bytesPerPeriod - bytesAlreadyUsed; } } bytesAlreadyUsed -= numOfBytes; } }
4,684
34.225564
83
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReadOnlyList.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.ListIterator; import org.apache.hadoop.classification.InterfaceAudience; /** * A {@link ReadOnlyList} is a unmodifiable list, * which supports read-only operations. * * @param <E> The type of the list elements. */ @InterfaceAudience.Private public interface ReadOnlyList<E> extends Iterable<E> { /** * Is this an empty list? */ boolean isEmpty(); /** * @return the size of this list. */ int size(); /** * @return the i-th element. */ E get(int i); /** * Utilities for {@link ReadOnlyList} */ public static class Util { /** @return an empty list. */ public static <E> ReadOnlyList<E> emptyList() { return ReadOnlyList.Util.asReadOnlyList(Collections.<E>emptyList()); } /** * The same as {@link Collections#binarySearch(List, Object)} * except that the list is a {@link ReadOnlyList}. * * @return the insertion point defined * in {@link Collections#binarySearch(List, Object)}. */ public static <K, E extends Comparable<K>> int binarySearch( final ReadOnlyList<E> list, final K key) { int lower = 0; for(int upper = list.size() - 1; lower <= upper; ) { final int mid = (upper + lower) >>> 1; final int d = list.get(mid).compareTo(key); if (d == 0) { return mid; } else if (d > 0) { upper = mid - 1; } else { lower = mid + 1; } } return -(lower + 1); } /** * @return a {@link ReadOnlyList} view of the given list. */ public static <E> ReadOnlyList<E> asReadOnlyList(final List<E> list) { return new ReadOnlyList<E>() { @Override public Iterator<E> iterator() { return list.iterator(); } @Override public boolean isEmpty() { return list.isEmpty(); } @Override public int size() { return list.size(); } @Override public E get(int i) { return list.get(i); } }; } /** * @return a {@link List} view of the given list. */ public static <E> List<E> asList(final ReadOnlyList<E> list) { return new List<E>() { @Override public Iterator<E> iterator() { return list.iterator(); } @Override public boolean isEmpty() { return list.isEmpty(); } @Override public int size() { return list.size(); } @Override public E get(int i) { return list.get(i); } @Override public Object[] toArray() { final Object[] a = new Object[size()]; for(int i = 0; i < a.length; i++) { a[i] = get(i); } return a; } //All methods below are not supported. @Override public boolean add(E e) { throw new UnsupportedOperationException(); } @Override public void add(int index, E element) { throw new UnsupportedOperationException(); } @Override public boolean addAll(Collection<? extends E> c) { throw new UnsupportedOperationException(); } @Override public boolean addAll(int index, Collection<? extends E> c) { throw new UnsupportedOperationException(); } @Override public void clear() { throw new UnsupportedOperationException(); } @Override public boolean contains(Object o) { throw new UnsupportedOperationException(); } @Override public boolean containsAll(Collection<?> c) { throw new UnsupportedOperationException(); } @Override public int indexOf(Object o) { throw new UnsupportedOperationException(); } @Override public int lastIndexOf(Object o) { throw new UnsupportedOperationException(); } @Override public ListIterator<E> listIterator() { throw new UnsupportedOperationException(); } @Override public ListIterator<E> listIterator(int index) { throw new UnsupportedOperationException(); } @Override public boolean remove(Object o) { throw new UnsupportedOperationException(); } @Override public E remove(int index) { throw new UnsupportedOperationException(); } @Override public boolean removeAll(Collection<?> c) { throw new UnsupportedOperationException(); } @Override public boolean retainAll(Collection<?> c) { throw new UnsupportedOperationException(); } @Override public E set(int index, E element) { throw new UnsupportedOperationException(); } @Override public List<E> subList(int fromIndex, int toIndex) { throw new UnsupportedOperationException(); } @Override public <T> T[] toArray(T[] a) { throw new UnsupportedOperationException(); } }; } } }
6,196
24.713693
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.io.EOFException; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import com.google.common.base.Preconditions; /** * An InputStream implementations which reads from some other InputStream * but expects an exact number of bytes. Any attempts to read past the * specified number of bytes will return as if the end of the stream * was reached. If the end of the underlying stream is reached prior to * the specified number of bytes, an EOFException is thrown. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class ExactSizeInputStream extends FilterInputStream { private int remaining; /** * Construct an input stream that will read no more than * 'numBytes' bytes. * * If an EOF occurs on the underlying stream before numBytes * bytes have been read, an EOFException will be thrown. * * @param in the inputstream to wrap * @param numBytes the number of bytes to read */ public ExactSizeInputStream(InputStream in, int numBytes) { super(in); Preconditions.checkArgument(numBytes >= 0, "Negative expected bytes: ", numBytes); this.remaining = numBytes; } @Override public int available() throws IOException { return Math.min(super.available(), remaining); } @Override public int read() throws IOException { // EOF if we reached our limit if (remaining <= 0) { return -1; } final int result = super.read(); if (result >= 0) { --remaining; } else if (remaining > 0) { // Underlying stream reached EOF but we haven't read the expected // number of bytes. throw new EOFException( "Premature EOF. Expected " + remaining + "more bytes"); } return result; } @Override public int read(final byte[] b, final int off, int len) throws IOException { if (remaining <= 0) { return -1; } len = Math.min(len, remaining); final int result = super.read(b, off, len); if (result >= 0) { remaining -= result; } else if (remaining > 0) { // Underlying stream reached EOF but we haven't read the expected // number of bytes. throw new EOFException( "Premature EOF. Expected " + remaining + "more bytes"); } return result; } @Override public long skip(final long n) throws IOException { final long result = super.skip(Math.min(n, remaining)); if (result > 0) { remaining -= result; } else if (remaining > 0) { // Underlying stream reached EOF but we haven't read the expected // number of bytes. throw new EOFException( "Premature EOF. Expected " + remaining + "more bytes"); } return result; } @Override public boolean markSupported() { return false; } @Override public void mark(int readlimit) { throw new UnsupportedOperationException(); } }
3,894
30.16
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.io.PrintStream; import java.util.ArrayList; import java.util.Collection; import java.util.ConcurrentModificationException; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.NoSuchElementException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * A low memory linked hash set implementation, which uses an array for storing * the elements and linked lists for collision resolution. This class does not * support null element. * * This class is not thread safe. * */ public class LightWeightHashSet<T> implements Collection<T> { /** * Elements of {@link LightWeightLinkedSet}. */ static class LinkedElement<T> { protected final T element; // reference to the next entry within a bucket linked list protected LinkedElement<T> next; //hashCode of the element protected final int hashCode; public LinkedElement(T elem, int hash) { this.element = elem; this.next = null; this.hashCode = hash; } @Override public String toString() { return element.toString(); } } protected static final float DEFAULT_MAX_LOAD_FACTOR = 0.75f; protected static final float DEFAUT_MIN_LOAD_FACTOR = 0.2f; protected static final int MINIMUM_CAPACITY = 16; static final int MAXIMUM_CAPACITY = 1 << 30; private static final Log LOG = LogFactory.getLog(LightWeightHashSet.class); /** * An internal array of entries, which are the rows of the hash table. The * size must be a power of two. */ protected LinkedElement<T>[] entries; /** Size of the entry table. */ private int capacity; /** The size of the set (not the entry array). */ protected int size = 0; /** Hashmask used for determining the bucket index **/ private int hash_mask; /** Capacity at initialization time **/ private final int initialCapacity; /** * Modification version for fail-fast. * * @see ConcurrentModificationException */ protected int modification = 0; private float maxLoadFactor; private float minLoadFactor; private final int expandMultiplier = 2; private int expandThreshold; private int shrinkThreshold; /** * @param initCapacity * Recommended size of the internal array. * @param maxLoadFactor * used to determine when to expand the internal array * @param minLoadFactor * used to determine when to shrink the internal array */ @SuppressWarnings("unchecked") public LightWeightHashSet(int initCapacity, float maxLoadFactor, float minLoadFactor) { if (maxLoadFactor <= 0 || maxLoadFactor > 1.0f) throw new IllegalArgumentException("Illegal maxload factor: " + maxLoadFactor); if (minLoadFactor <= 0 || minLoadFactor > maxLoadFactor) throw new IllegalArgumentException("Illegal minload factor: " + minLoadFactor); this.initialCapacity = computeCapacity(initCapacity); this.capacity = this.initialCapacity; this.hash_mask = capacity - 1; this.maxLoadFactor = maxLoadFactor; this.expandThreshold = (int) (capacity * maxLoadFactor); this.minLoadFactor = minLoadFactor; this.shrinkThreshold = (int) (capacity * minLoadFactor); entries = new LinkedElement[capacity]; if (LOG.isDebugEnabled()) { LOG.debug("initial capacity=" + initialCapacity + ", max load factor= " + maxLoadFactor + ", min load factor= " + minLoadFactor); } } public LightWeightHashSet() { this(MINIMUM_CAPACITY, DEFAULT_MAX_LOAD_FACTOR, DEFAUT_MIN_LOAD_FACTOR); } public LightWeightHashSet(int minCapacity) { this(minCapacity, DEFAULT_MAX_LOAD_FACTOR, DEFAUT_MIN_LOAD_FACTOR); } /** * Check if the set is empty. * * @return true is set empty, false otherwise */ @Override public boolean isEmpty() { return size == 0; } /** * Return the current capacity (for testing). */ public int getCapacity() { return capacity; } /** * Return the number of stored elements. */ @Override public int size() { return size; } /** * Get index in the internal table for a given hash. */ protected int getIndex(int hashCode) { return hashCode & hash_mask; } /** * Check if the set contains given element * * @return true if element present, false otherwise. */ @SuppressWarnings("unchecked") @Override public boolean contains(final Object key) { return getElement((T)key) != null; } /** * Return the element in this set which is equal to * the given key, if such an element exists. * Otherwise returns null. */ public T getElement(final T key) { // validate key if (key == null) { throw new IllegalArgumentException("Null element is not supported."); } // find element final int hashCode = key.hashCode(); final int index = getIndex(hashCode); return getContainedElem(index, key, hashCode); } /** * Check if the set contains given element at given index. If it * does, return that element. * * @return the element, or null, if no element matches */ protected T getContainedElem(int index, final T key, int hashCode) { for (LinkedElement<T> e = entries[index]; e != null; e = e.next) { // element found if (hashCode == e.hashCode && e.element.equals(key)) { return e.element; } } // element not found return null; } /** * All all elements in the collection. Expand if necessary. * * @param toAdd - elements to add. * @return true if the set has changed, false otherwise */ @Override public boolean addAll(Collection<? extends T> toAdd) { boolean changed = false; for (T elem : toAdd) { changed |= addElem(elem); } expandIfNecessary(); return changed; } /** * Add given element to the hash table. Expand table if necessary. * * @return true if the element was not present in the table, false otherwise */ @Override public boolean add(final T element) { boolean added = addElem(element); expandIfNecessary(); return added; } /** * Add given element to the hash table * * @return true if the element was not present in the table, false otherwise */ protected boolean addElem(final T element) { // validate element if (element == null) { throw new IllegalArgumentException("Null element is not supported."); } // find hashCode & index final int hashCode = element.hashCode(); final int index = getIndex(hashCode); // return false if already present if (getContainedElem(index, element, hashCode) != null) { return false; } modification++; size++; // update bucket linked list LinkedElement<T> le = new LinkedElement<T>(element, hashCode); le.next = entries[index]; entries[index] = le; return true; } /** * Remove the element corresponding to the key. * * @return If such element exists, return true. Otherwise, return false. */ @Override @SuppressWarnings("unchecked") public boolean remove(final Object key) { // validate key if (key == null) { throw new IllegalArgumentException("Null element is not supported."); } LinkedElement<T> removed = removeElem((T) key); shrinkIfNecessary(); return removed == null ? false : true; } /** * Remove the element corresponding to the key, given key.hashCode() == index. * * @return If such element exists, return true. Otherwise, return false. */ protected LinkedElement<T> removeElem(final T key) { LinkedElement<T> found = null; final int hashCode = key.hashCode(); final int index = getIndex(hashCode); if (entries[index] == null) { return null; } else if (hashCode == entries[index].hashCode && entries[index].element.equals(key)) { // remove the head of the bucket linked list modification++; size--; found = entries[index]; entries[index] = found.next; } else { // head != null and key is not equal to head // search the element LinkedElement<T> prev = entries[index]; for (found = prev.next; found != null;) { if (hashCode == found.hashCode && found.element.equals(key)) { // found the element, remove it modification++; size--; prev.next = found.next; found.next = null; break; } else { prev = found; found = found.next; } } } return found; } /** * Remove and return n elements from the hashtable. * The order in which entries are removed is unspecified, and * and may not correspond to the order in which they were inserted. * * @return first element */ public List<T> pollN(int n) { if (n >= size) { return pollAll(); } List<T> retList = new ArrayList<T>(n); if (n == 0) { return retList; } boolean done = false; int currentBucketIndex = 0; while (!done) { LinkedElement<T> current = entries[currentBucketIndex]; while (current != null) { retList.add(current.element); current = current.next; entries[currentBucketIndex] = current; size--; modification++; if (--n == 0) { done = true; break; } } currentBucketIndex++; } shrinkIfNecessary(); return retList; } /** * Remove all elements from the set and return them. Clear the entries. */ public List<T> pollAll() { List<T> retList = new ArrayList<T>(size); for (int i = 0; i < entries.length; i++) { LinkedElement<T> current = entries[i]; while (current != null) { retList.add(current.element); current = current.next; } } this.clear(); return retList; } /** * Get array.length elements from the set, and put them into the array. */ @SuppressWarnings("unchecked") public T[] pollToArray(T[] array) { int currentIndex = 0; LinkedElement<T> current = null; if (array.length == 0) { return array; } if (array.length > size) { array = (T[]) java.lang.reflect.Array.newInstance(array.getClass() .getComponentType(), size); } // do fast polling if the entire set needs to be fetched if (array.length == size) { for (int i = 0; i < entries.length; i++) { current = entries[i]; while (current != null) { array[currentIndex++] = current.element; current = current.next; } } this.clear(); return array; } boolean done = false; int currentBucketIndex = 0; while (!done) { current = entries[currentBucketIndex]; while (current != null) { array[currentIndex++] = current.element; current = current.next; entries[currentBucketIndex] = current; size--; modification++; if (currentIndex == array.length) { done = true; break; } } currentBucketIndex++; } shrinkIfNecessary(); return array; } /** * Compute capacity given initial capacity. * * @return final capacity, either MIN_CAPACITY, MAX_CAPACITY, or power of 2 * closest to the requested capacity. */ private int computeCapacity(int initial) { if (initial < MINIMUM_CAPACITY) { return MINIMUM_CAPACITY; } if (initial > MAXIMUM_CAPACITY) { return MAXIMUM_CAPACITY; } int capacity = 1; while (capacity < initial) { capacity <<= 1; } return capacity; } /** * Resize the internal table to given capacity. */ @SuppressWarnings("unchecked") private void resize(int cap) { int newCapacity = computeCapacity(cap); if (newCapacity == this.capacity) { return; } this.capacity = newCapacity; this.expandThreshold = (int) (capacity * maxLoadFactor); this.shrinkThreshold = (int) (capacity * minLoadFactor); this.hash_mask = capacity - 1; LinkedElement<T>[] temp = entries; entries = new LinkedElement[capacity]; for (int i = 0; i < temp.length; i++) { LinkedElement<T> curr = temp[i]; while (curr != null) { LinkedElement<T> next = curr.next; int index = getIndex(curr.hashCode); curr.next = entries[index]; entries[index] = curr; curr = next; } } } /** * Checks if we need to shrink, and shrinks if necessary. */ protected void shrinkIfNecessary() { if (size < this.shrinkThreshold && capacity > initialCapacity) { resize(capacity / expandMultiplier); } } /** * Checks if we need to expand, and expands if necessary. */ protected void expandIfNecessary() { if (size > this.expandThreshold && capacity < MAXIMUM_CAPACITY) { resize(capacity * expandMultiplier); } } @Override public Iterator<T> iterator() { return new LinkedSetIterator(); } @Override public String toString() { final StringBuilder b = new StringBuilder(getClass().getSimpleName()); b.append("(size=").append(size).append(", modification=") .append(modification).append(", entries.length=") .append(entries.length).append(")"); return b.toString(); } /** Print detailed information of this object. */ public void printDetails(final PrintStream out) { out.print(this + ", entries = ["); for (int i = 0; i < entries.length; i++) { if (entries[i] != null) { LinkedElement<T> e = entries[i]; out.print("\n " + i + ": " + e); for (e = e.next; e != null; e = e.next) { out.print(" -> " + e); } } } out.println("\n]"); } private class LinkedSetIterator implements Iterator<T> { /** The starting modification for fail-fast. */ private final int startModification = modification; /** The current index of the entry array. */ private int index = -1; /** The next element to return. */ private LinkedElement<T> next = nextNonemptyEntry(); private LinkedElement<T> nextNonemptyEntry() { for (index++; index < entries.length && entries[index] == null; index++); return index < entries.length ? entries[index] : null; } @Override public boolean hasNext() { return next != null; } @Override public T next() { if (modification != startModification) { throw new ConcurrentModificationException("modification=" + modification + " != startModification = " + startModification); } if (next == null) { throw new NoSuchElementException(); } final T e = next.element; // find the next element final LinkedElement<T> n = next.next; next = n != null ? n : nextNonemptyEntry(); return e; } @Override public void remove() { throw new UnsupportedOperationException("Remove is not supported."); } } /** * Clear the set. Resize it to the original capacity. */ @Override @SuppressWarnings("unchecked") public void clear() { this.capacity = this.initialCapacity; this.hash_mask = capacity - 1; this.expandThreshold = (int) (capacity * maxLoadFactor); this.shrinkThreshold = (int) (capacity * minLoadFactor); entries = new LinkedElement[capacity]; size = 0; modification++; } @Override public Object[] toArray() { Object[] result = new Object[size]; return toArray(result); } @Override @SuppressWarnings("unchecked") public <U> U[] toArray(U[] a) { if (a == null) { throw new NullPointerException("Input array can not be null"); } if (a.length < size) { a = (U[]) java.lang.reflect.Array.newInstance(a.getClass() .getComponentType(), size); } int currentIndex = 0; for (int i = 0; i < entries.length; i++) { LinkedElement<T> current = entries[i]; while (current != null) { a[currentIndex++] = (U) current.element; current = current.next; } } return a; } @Override public boolean containsAll(Collection<?> c) { Iterator<?> iter = c.iterator(); while (iter.hasNext()) { if (!contains(iter.next())) { return false; } } return true; } @Override public boolean removeAll(Collection<?> c) { boolean changed = false; Iterator<?> iter = c.iterator(); while (iter.hasNext()) { changed |= remove(iter.next()); } return changed; } @Override public boolean retainAll(Collection<?> c) { throw new UnsupportedOperationException("retainAll is not supported."); } }
17,619
26.53125
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.security.DigestInputStream; import java.security.MessageDigest; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.StringUtils; import com.google.common.base.Charsets; /** * Static functions for dealing with files of the same format * that the Unix "md5sum" utility writes. */ public abstract class MD5FileUtils { private static final Log LOG = LogFactory.getLog( MD5FileUtils.class); public static final String MD5_SUFFIX = ".md5"; private static final Pattern LINE_REGEX = Pattern.compile("([0-9a-f]{32}) [ \\*](.+)"); /** * Verify that the previously saved md5 for the given file matches * expectedMd5. * @throws IOException */ public static void verifySavedMD5(File dataFile, MD5Hash expectedMD5) throws IOException { MD5Hash storedHash = readStoredMd5ForFile(dataFile); // Check the hash itself if (!expectedMD5.equals(storedHash)) { throw new IOException( "File " + dataFile + " did not match stored MD5 checksum " + " (stored: " + storedHash + ", computed: " + expectedMD5); } } /** * Read the md5 file stored alongside the given data file * and match the md5 file content. * @param dataFile the file containing data * @return a matcher with two matched groups * where group(1) is the md5 string and group(2) is the data file path. */ private static Matcher readStoredMd5(File md5File) throws IOException { BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream( md5File), Charsets.UTF_8)); String md5Line; try { md5Line = reader.readLine(); if (md5Line == null) { md5Line = ""; } md5Line = md5Line.trim(); } catch (IOException ioe) { throw new IOException("Error reading md5 file at " + md5File, ioe); } finally { IOUtils.cleanup(LOG, reader); } Matcher matcher = LINE_REGEX.matcher(md5Line); if (!matcher.matches()) { throw new IOException("Invalid MD5 file " + md5File + ": the content \"" + md5Line + "\" does not match the expected pattern."); } return matcher; } /** * Read the md5 checksum stored alongside the given data file. * @param dataFile the file containing data * @return the checksum stored in dataFile.md5 */ public static MD5Hash readStoredMd5ForFile(File dataFile) throws IOException { final File md5File = getDigestFileForFile(dataFile); if (!md5File.exists()) { return null; } final Matcher matcher = readStoredMd5(md5File); String storedHash = matcher.group(1); File referencedFile = new File(matcher.group(2)); // Sanity check: Make sure that the file referenced in the .md5 file at // least has the same name as the file we expect if (!referencedFile.getName().equals(dataFile.getName())) { throw new IOException( "MD5 file at " + md5File + " references file named " + referencedFile.getName() + " but we expected it to reference " + dataFile); } return new MD5Hash(storedHash); } /** * Read dataFile and compute its MD5 checksum. */ public static MD5Hash computeMd5ForFile(File dataFile) throws IOException { InputStream in = new FileInputStream(dataFile); try { MessageDigest digester = MD5Hash.getDigester(); DigestInputStream dis = new DigestInputStream(in, digester); IOUtils.copyBytes(dis, new IOUtils.NullOutputStream(), 128*1024); return new MD5Hash(digester.digest()); } finally { IOUtils.closeStream(in); } } /** * Save the ".md5" file that lists the md5sum of another file. * @param dataFile the original file whose md5 was computed * @param digest the computed digest * @throws IOException */ public static void saveMD5File(File dataFile, MD5Hash digest) throws IOException { final String digestString = StringUtils.byteToHexString(digest.getDigest()); saveMD5File(dataFile, digestString); } private static void saveMD5File(File dataFile, String digestString) throws IOException { File md5File = getDigestFileForFile(dataFile); String md5Line = digestString + " *" + dataFile.getName() + "\n"; AtomicFileOutputStream afos = new AtomicFileOutputStream(md5File); afos.write(md5Line.getBytes(Charsets.UTF_8)); afos.close(); if (LOG.isDebugEnabled()) { LOG.debug("Saved MD5 " + digestString + " to " + md5File); } } public static void renameMD5File(File oldDataFile, File newDataFile) throws IOException { final File fromFile = getDigestFileForFile(oldDataFile); if (!fromFile.exists()) { throw new FileNotFoundException(fromFile + " does not exist."); } final String digestString = readStoredMd5(fromFile).group(1); saveMD5File(newDataFile, digestString); if (!fromFile.delete()) { LOG.warn("deleting " + fromFile.getAbsolutePath() + " FAILED"); } } /** * @return a reference to the file with .md5 suffix that will * contain the md5 checksum for the given data file. */ public static File getDigestFileForFile(File file) { return new File(file.getParentFile(), file.getName() + MD5_SUFFIX); } }
6,502
33.407407
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/BestEffortLongFile.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.io.Closeable; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IOUtils; import com.google.common.io.Files; import com.google.common.primitives.Longs; /** * Class that represents a file on disk which stores a single <code>long</code> * value, but does not make any effort to make it truly durable. This is in * contrast to {@link PersistentLongFile} which fsync()s the value on every * change. * * This should be used for values which are updated frequently (such that * performance is important) and not required to be up-to-date for correctness. * * This class also differs in that it stores the value as binary data instead * of a textual string. */ @InterfaceAudience.Private public class BestEffortLongFile implements Closeable { private final File file; private final long defaultVal; private long value; private FileChannel ch = null; private final ByteBuffer buf = ByteBuffer.allocate(Long.SIZE/8); public BestEffortLongFile(File file, long defaultVal) { this.file = file; this.defaultVal = defaultVal; } public long get() throws IOException { lazyOpen(); return value; } public void set(long newVal) throws IOException { lazyOpen(); buf.clear(); buf.putLong(newVal); buf.flip(); IOUtils.writeFully(ch, buf, 0); value = newVal; } private void lazyOpen() throws IOException { if (ch != null) { return; } // Load current value. byte[] data = null; try { data = Files.toByteArray(file); } catch (FileNotFoundException fnfe) { // Expected - this will use default value. } if (data != null && data.length != 0) { if (data.length != Longs.BYTES) { throw new IOException("File " + file + " had invalid length: " + data.length); } value = Longs.fromByteArray(data); } else { value = defaultVal; } // Now open file for future writes. RandomAccessFile raf = new RandomAccessFile(file, "rw"); try { ch = raf.getChannel(); } finally { if (ch == null) { IOUtils.closeStream(raf); } } } @Override public void close() throws IOException { if (ch != null) { ch.close(); ch = null; } } }
3,343
27.10084
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import com.google.common.base.Preconditions; /** * The difference between the current state and a previous state of a list. * * Given a previous state of a set and a sequence of create, delete and modify * operations such that the current state of the set can be obtained by applying * the operations on the previous state, the following algorithm construct the * difference between the current state and the previous state of the set. * * <pre> * Two lists are maintained in the algorithm: * - c-list for newly created elements * - d-list for the deleted elements * * Denote the state of an element by the following * (0, 0): neither in c-list nor d-list * (c, 0): in c-list but not in d-list * (0, d): in d-list but not in c-list * (c, d): in both c-list and d-list * * For each case below, ( , ) at the end shows the result state of the element. * * Case 1. Suppose the element i is NOT in the previous state. (0, 0) * 1.1. create i in current: add it to c-list (c, 0) * 1.1.1. create i in current and then create: impossible * 1.1.2. create i in current and then delete: remove it from c-list (0, 0) * 1.1.3. create i in current and then modify: replace it in c-list (c', 0) * * 1.2. delete i from current: impossible * * 1.3. modify i in current: impossible * * Case 2. Suppose the element i is ALREADY in the previous state. (0, 0) * 2.1. create i in current: impossible * * 2.2. delete i from current: add it to d-list (0, d) * 2.2.1. delete i from current and then create: add it to c-list (c, d) * 2.2.2. delete i from current and then delete: impossible * 2.2.2. delete i from current and then modify: impossible * * 2.3. modify i in current: put it in both c-list and d-list (c, d) * 2.3.1. modify i in current and then create: impossible * 2.3.2. modify i in current and then delete: remove it from c-list (0, d) * 2.3.3. modify i in current and then modify: replace it in c-list (c', d) * </pre> * * @param <K> The key type. * @param <E> The element type, which must implement {@link Element} interface. */ public class Diff<K, E extends Diff.Element<K>> { public static enum ListType { CREATED, DELETED } /** An interface for the elements in a {@link Diff}. */ public static interface Element<K> extends Comparable<K> { /** @return the key of this object. */ public K getKey(); } /** An interface for passing a method in order to process elements. */ public static interface Processor<E> { /** Process the given element. */ public void process(E element); } /** Containing exactly one element. */ public static class Container<E> { private final E element; private Container(E element) { this.element = element; } /** @return the element. */ public E getElement() { return element; } } /** * Undo information for some operations such as delete(E) * and {@link Diff#modify(Element, Element)}. */ public static class UndoInfo<E> { private final int createdInsertionPoint; private final E trashed; private final Integer deletedInsertionPoint; private UndoInfo(final int createdInsertionPoint, final E trashed, final Integer deletedInsertionPoint) { this.createdInsertionPoint = createdInsertionPoint; this.trashed = trashed; this.deletedInsertionPoint = deletedInsertionPoint; } public E getTrashedElement() { return trashed; } } private static final int DEFAULT_ARRAY_INITIAL_CAPACITY = 4; /** * Search the element from the list. * @return -1 if the list is null; otherwise, return the insertion point * defined in {@link Collections#binarySearch(List, Object)}. * Note that, when the list is null, -1 is the correct insertion point. */ protected static <K, E extends Comparable<K>> int search( final List<E> elements, final K name) { return elements == null? -1: Collections.binarySearch(elements, name); } private static <E> void remove(final List<E> elements, final int i, final E expected) { final E removed = elements.remove(-i - 1); Preconditions.checkState(removed == expected, "removed != expected=%s, removed=%s.", expected, removed); } /** c-list: element(s) created in current. */ private List<E> created; /** d-list: element(s) deleted from current. */ private List<E> deleted; protected Diff() {} protected Diff(final List<E> created, final List<E> deleted) { this.created = created; this.deleted = deleted; } /** @return the created list, which is never null. */ public List<E> getList(final ListType type) { final List<E> list = type == ListType.CREATED? created: deleted; return list == null? Collections.<E>emptyList(): list; } public int searchIndex(final ListType type, final K name) { return search(getList(type), name); } /** * @return null if the element is not found; * otherwise, return the element in the created/deleted list. */ public E search(final ListType type, final K name) { final List<E> list = getList(type); final int c = search(list, name); return c < 0 ? null : list.get(c); } /** @return true if no changes contained in the diff */ public boolean isEmpty() { return (created == null || created.isEmpty()) && (deleted == null || deleted.isEmpty()); } /** * Insert the given element to the created/deleted list. * @param i the insertion point defined * in {@link Collections#binarySearch(List, Object)} */ private void insert(final ListType type, final E element, final int i) { List<E> list = type == ListType.CREATED? created: deleted; if (i >= 0) { throw new AssertionError("Element already exists: element=" + element + ", " + type + "=" + list); } if (list == null) { list = new ArrayList<E>(DEFAULT_ARRAY_INITIAL_CAPACITY); if (type == ListType.CREATED) { created = list; } else if (type == ListType.DELETED){ deleted = list; } } list.add(-i - 1, element); } /** * Create an element in current state. * @return the c-list insertion point for undo. */ public int create(final E element) { final int c = search(created, element.getKey()); insert(ListType.CREATED, element, c); return c; } /** * Undo the previous create(E) operation. Note that the behavior is * undefined if the previous operation is not create(E). */ public void undoCreate(final E element, final int insertionPoint) { remove(created, insertionPoint, element); } /** * Delete an element from current state. * @return the undo information. */ public UndoInfo<E> delete(final E element) { final int c = search(created, element.getKey()); E previous = null; Integer d = null; if (c >= 0) { // remove a newly created element previous = created.remove(c); } else { // not in c-list, it must be in previous d = search(deleted, element.getKey()); insert(ListType.DELETED, element, d); } return new UndoInfo<E>(c, previous, d); } /** * Undo the previous delete(E) operation. Note that the behavior is * undefined if the previous operation is not delete(E). */ public void undoDelete(final E element, final UndoInfo<E> undoInfo) { final int c = undoInfo.createdInsertionPoint; if (c >= 0) { created.add(c, undoInfo.trashed); } else { remove(deleted, undoInfo.deletedInsertionPoint, element); } } /** * Modify an element in current state. * @return the undo information. */ public UndoInfo<E> modify(final E oldElement, final E newElement) { Preconditions.checkArgument(oldElement != newElement, "They are the same object: oldElement == newElement = %s", newElement); Preconditions.checkArgument(oldElement.compareTo(newElement.getKey()) == 0, "The names do not match: oldElement=%s, newElement=%s", oldElement, newElement); final int c = search(created, newElement.getKey()); E previous = null; Integer d = null; if (c >= 0) { // Case 1.1.3 and 2.3.3: element is already in c-list, previous = created.set(c, newElement); // For previous != oldElement, set it to oldElement previous = oldElement; } else { d = search(deleted, oldElement.getKey()); if (d < 0) { // Case 2.3: neither in c-list nor d-list insert(ListType.CREATED, newElement, c); insert(ListType.DELETED, oldElement, d); } } return new UndoInfo<E>(c, previous, d); } /** * Undo the previous modify(E, E) operation. Note that the behavior * is undefined if the previous operation is not modify(E, E). */ public void undoModify(final E oldElement, final E newElement, final UndoInfo<E> undoInfo) { final int c = undoInfo.createdInsertionPoint; if (c >= 0) { created.set(c, undoInfo.trashed); } else { final int d = undoInfo.deletedInsertionPoint; if (d < 0) { remove(created, c, newElement); remove(deleted, d, oldElement); } } } /** * Find an element in the previous state. * * @return null if the element cannot be determined in the previous state * since no change is recorded and it should be determined in the * current state; otherwise, return a {@link Container} containing the * element in the previous state. Note that the element can possibly * be null which means that the element is not found in the previous * state. */ public Container<E> accessPrevious(final K name) { return accessPrevious(name, created, deleted); } private static <K, E extends Diff.Element<K>> Container<E> accessPrevious( final K name, final List<E> clist, final List<E> dlist) { final int d = search(dlist, name); if (d >= 0) { // the element was in previous and was once deleted in current. return new Container<E>(dlist.get(d)); } else { final int c = search(clist, name); // When c >= 0, the element in current is a newly created element. return c < 0? null: new Container<E>(null); } } /** * Find an element in the current state. * * @return null if the element cannot be determined in the current state since * no change is recorded and it should be determined in the previous * state; otherwise, return a {@link Container} containing the element in * the current state. Note that the element can possibly be null which * means that the element is not found in the current state. */ public Container<E> accessCurrent(K name) { return accessPrevious(name, deleted, created); } /** * Apply this diff to previous state in order to obtain current state. * @return the current state of the list. */ public List<E> apply2Previous(final List<E> previous) { return apply2Previous(previous, getList(ListType.CREATED), getList(ListType.DELETED)); } private static <K, E extends Diff.Element<K>> List<E> apply2Previous( final List<E> previous, final List<E> clist, final List<E> dlist) { // Assumptions: // (A1) All lists are sorted. // (A2) All elements in dlist must be in previous. // (A3) All elements in clist must be not in tmp = previous - dlist. final List<E> tmp = new ArrayList<E>(previous.size() - dlist.size()); { // tmp = previous - dlist final Iterator<E> i = previous.iterator(); for(E deleted : dlist) { E e = i.next(); //since dlist is non-empty, e must exist by (A2). int cmp = 0; for(; (cmp = e.compareTo(deleted.getKey())) < 0; e = i.next()) { tmp.add(e); } Preconditions.checkState(cmp == 0); // check (A2) } for(; i.hasNext(); ) { tmp.add(i.next()); } } final List<E> current = new ArrayList<E>(tmp.size() + clist.size()); { // current = tmp + clist final Iterator<E> tmpIterator = tmp.iterator(); final Iterator<E> cIterator = clist.iterator(); E t = tmpIterator.hasNext()? tmpIterator.next(): null; E c = cIterator.hasNext()? cIterator.next(): null; for(; t != null || c != null; ) { final int cmp = c == null? 1 : t == null? -1 : c.compareTo(t.getKey()); if (cmp < 0) { current.add(c); c = cIterator.hasNext()? cIterator.next(): null; } else if (cmp > 0) { current.add(t); t = tmpIterator.hasNext()? tmpIterator.next(): null; } else { throw new AssertionError("Violated assumption (A3)."); } } } return current; } /** * Apply the reverse of this diff to current state in order * to obtain the previous state. * @return the previous state of the list. */ public List<E> apply2Current(final List<E> current) { return apply2Previous(current, getList(ListType.DELETED), getList(ListType.CREATED)); } /** * Combine this diff with a posterior diff. We have the following cases: * * <pre> * 1. For (c, 0) in the posterior diff, check the element in this diff: * 1.1 (c', 0) in this diff: impossible * 1.2 (0, d') in this diff: put in c-list --> (c, d') * 1.3 (c', d') in this diff: impossible * 1.4 (0, 0) in this diff: put in c-list --> (c, 0) * This is the same logic as create(E). * * 2. For (0, d) in the posterior diff, * 2.1 (c', 0) in this diff: remove from c-list --> (0, 0) * 2.2 (0, d') in this diff: impossible * 2.3 (c', d') in this diff: remove from c-list --> (0, d') * 2.4 (0, 0) in this diff: put in d-list --> (0, d) * This is the same logic as delete(E). * * 3. For (c, d) in the posterior diff, * 3.1 (c', 0) in this diff: replace the element in c-list --> (c, 0) * 3.2 (0, d') in this diff: impossible * 3.3 (c', d') in this diff: replace the element in c-list --> (c, d') * 3.4 (0, 0) in this diff: put in c-list and d-list --> (c, d) * This is the same logic as modify(E, E). * </pre> * * @param posterior The posterior diff to combine with. * @param deletedProcesser * process the deleted/overwritten elements in case 2.1, 2.3, 3.1 and 3.3. */ public void combinePosterior(final Diff<K, E> posterior, final Processor<E> deletedProcesser) { final Iterator<E> createdIterator = posterior.getList(ListType.CREATED).iterator(); final Iterator<E> deletedIterator = posterior.getList(ListType.DELETED).iterator(); E c = createdIterator.hasNext()? createdIterator.next(): null; E d = deletedIterator.hasNext()? deletedIterator.next(): null; for(; c != null || d != null; ) { final int cmp = c == null? 1 : d == null? -1 : c.compareTo(d.getKey()); if (cmp < 0) { // case 1: only in c-list create(c); c = createdIterator.hasNext()? createdIterator.next(): null; } else if (cmp > 0) { // case 2: only in d-list final UndoInfo<E> ui = delete(d); if (deletedProcesser != null) { deletedProcesser.process(ui.trashed); } d = deletedIterator.hasNext()? deletedIterator.next(): null; } else { // case 3: in both c-list and d-list final UndoInfo<E> ui = modify(d, c); if (deletedProcesser != null) { deletedProcesser.process(ui.trashed); } c = createdIterator.hasNext()? createdIterator.next(): null; d = deletedIterator.hasNext()? deletedIterator.next(): null; } } } @Override public String toString() { return getClass().getSimpleName() + "{created=" + getList(ListType.CREATED) + ", deleted=" + getList(ListType.DELETED) + "}"; } }
17,029
34.041152
87
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.util.ConcurrentModificationException; import java.util.Iterator; import java.util.ArrayList; import java.util.List; import java.util.NoSuchElementException; /** * A low memory linked hash set implementation, which uses an array for storing * the elements and linked lists for collision resolution. In addition it stores * elements in a linked list to ensure ordered traversal. This class does not * support null element. * * This class is not thread safe. * */ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> { /** * Elements of {@link LightWeightLinkedSet}. */ static class DoubleLinkedElement<T> extends LinkedElement<T> { // references to elements within all-element linked list private DoubleLinkedElement<T> before; private DoubleLinkedElement<T> after; public DoubleLinkedElement(T elem, int hashCode) { super(elem, hashCode); this.before = null; this.after = null; } @Override public String toString() { return super.toString(); } } private DoubleLinkedElement<T> head; private DoubleLinkedElement<T> tail; private LinkedSetIterator bookmark; /** * @param initCapacity * Recommended size of the internal array. * @param maxLoadFactor * used to determine when to expand the internal array * @param minLoadFactor * used to determine when to shrink the internal array */ public LightWeightLinkedSet(int initCapacity, float maxLoadFactor, float minLoadFactor) { super(initCapacity, maxLoadFactor, minLoadFactor); head = null; tail = null; bookmark = new LinkedSetIterator(); } public LightWeightLinkedSet() { this(MINIMUM_CAPACITY, DEFAULT_MAX_LOAD_FACTOR, DEFAUT_MIN_LOAD_FACTOR); } /** * Add given element to the hash table * * @return true if the element was not present in the table, false otherwise */ @Override protected boolean addElem(final T element) { // validate element if (element == null) { throw new IllegalArgumentException("Null element is not supported."); } // find hashCode & index final int hashCode = element.hashCode(); final int index = getIndex(hashCode); // return false if already present if (getContainedElem(index, element, hashCode) != null) { return false; } modification++; size++; // update bucket linked list DoubleLinkedElement<T> le = new DoubleLinkedElement<T>(element, hashCode); le.next = entries[index]; entries[index] = le; // insert to the end of the all-element linked list le.after = null; le.before = tail; if (tail != null) { tail.after = le; } tail = le; if (head == null) { head = le; bookmark.next = head; } // Update bookmark, if necessary. if (bookmark.next == null) { bookmark.next = le; } return true; } /** * Remove the element corresponding to the key, given key.hashCode() == index. * * @return Return the entry with the element if exists. Otherwise return null. */ @Override protected DoubleLinkedElement<T> removeElem(final T key) { DoubleLinkedElement<T> found = (DoubleLinkedElement<T>) (super .removeElem(key)); if (found == null) { return null; } // update linked list if (found.after != null) { found.after.before = found.before; } if (found.before != null) { found.before.after = found.after; } if (head == found) { head = head.after; } if (tail == found) { tail = tail.before; } // Update bookmark, if necessary. if (found == this.bookmark.next) { this.bookmark.next = found.after; } return found; } /** * Remove and return first element on the linked list of all elements. * * @return first element */ public T pollFirst() { if (head == null) { return null; } T first = head.element; this.remove(first); return first; } /** * Remove and return n elements from the hashtable. * The order in which entries are removed is corresponds * to the order in which they were inserted. * * @return first element */ @Override public List<T> pollN(int n) { if (n >= size) { // if we need to remove all elements then do fast polling return pollAll(); } List<T> retList = new ArrayList<T>(n); while (n-- > 0 && head != null) { T curr = head.element; this.removeElem(curr); retList.add(curr); } shrinkIfNecessary(); return retList; } /** * Remove all elements from the set and return them in order. Traverse the * link list, don't worry about hashtable - faster version of the parent * method. */ @Override public List<T> pollAll() { List<T> retList = new ArrayList<T>(size); while (head != null) { retList.add(head.element); head = head.after; } this.clear(); return retList; } @Override @SuppressWarnings("unchecked") public <U> U[] toArray(U[] a) { if (a == null) { throw new NullPointerException("Input array can not be null"); } if (a.length < size) { a = (U[]) java.lang.reflect.Array.newInstance(a.getClass() .getComponentType(), size); } int currentIndex = 0; DoubleLinkedElement<T> current = head; while (current != null) { T curr = current.element; a[currentIndex++] = (U) curr; current = current.after; } return a; } @Override public Iterator<T> iterator() { return new LinkedSetIterator(); } private class LinkedSetIterator implements Iterator<T> { /** The starting modification for fail-fast. */ private final int startModification = modification; /** The next element to return. */ private DoubleLinkedElement<T> next = head; @Override public boolean hasNext() { return next != null; } @Override public T next() { if (modification != startModification) { throw new ConcurrentModificationException("modification=" + modification + " != startModification = " + startModification); } if (next == null) { throw new NoSuchElementException(); } final T e = next.element; // find the next element next = next.after; return e; } @Override public void remove() { throw new UnsupportedOperationException("Remove is not supported."); } } /** * Clear the set. Resize it to the original capacity. */ @Override public void clear() { super.clear(); this.head = null; this.tail = null; this.resetBookmark(); } /** * Returns a new iterator starting at the bookmarked element. * * @return the iterator to the bookmarked element. */ public Iterator<T> getBookmark() { LinkedSetIterator toRet = new LinkedSetIterator(); toRet.next = this.bookmark.next; this.bookmark = toRet; return toRet; } /** * Resets the bookmark to the beginning of the list. */ public void resetBookmark() { this.bookmark.next = this.head; } }
8,023
25.746667
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Canceler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import org.apache.hadoop.classification.InterfaceAudience; /** * Provides a simple interface where one thread can mark an operation * for cancellation, and another thread can poll for whether the * cancellation has occurred. */ @InterfaceAudience.Private public class Canceler { /** * If the operation has been canceled, set to the reason why * it has been canceled (eg standby moving to active) */ private volatile String cancelReason = null; /** * Requests that the current operation be canceled if it is still running. * This does not block until the cancellation is successful. * @param reason the reason why cancellation is requested */ public void cancel(String reason) { this.cancelReason = reason; } public boolean isCancelled() { return cancelReason != null; } public String getCancellationReason() { return cancelReason; } }
1,743
32.538462
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteArray.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.util.Arrays; import org.apache.hadoop.classification.InterfaceAudience; /** * Wrapper for byte[] to use byte[] as key in HashMap */ @InterfaceAudience.Private public class ByteArray { private int hash = 0; // cache the hash code private final byte[] bytes; public ByteArray(byte[] bytes) { this.bytes = bytes; } public byte[] getBytes() { return bytes; } @Override public int hashCode() { if (hash == 0) { hash = Arrays.hashCode(bytes); } return hash; } @Override public boolean equals(Object o) { if (!(o instanceof ByteArray)) { return false; } return Arrays.equals(bytes, ((ByteArray)o).bytes); } }
1,544
26.589286
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.util.Arrays; import com.google.common.base.Preconditions; /** * Similar to {@link EnumCounters} except that the value type is double. * * @param <E> the enum type */ public class EnumDoubles<E extends Enum<E>> { /** The class of the enum. */ private final Class<E> enumClass; /** An array of doubles corresponding to the enum type. */ private final double[] doubles; /** * Construct doubles for the given enum constants. * @param enumClass the enum class. */ public EnumDoubles(final Class<E> enumClass) { final E[] enumConstants = enumClass.getEnumConstants(); Preconditions.checkNotNull(enumConstants); this.enumClass = enumClass; this.doubles = new double[enumConstants.length]; } /** @return the value corresponding to e. */ public final double get(final E e) { return doubles[e.ordinal()]; } /** Negate all values. */ public final void negation() { for(int i = 0; i < doubles.length; i++) { doubles[i] = -doubles[i]; } } /** Set e to the given value. */ public final void set(final E e, final double value) { doubles[e.ordinal()] = value; } /** Set the values of this object to that object. */ public final void set(final EnumDoubles<E> that) { for(int i = 0; i < doubles.length; i++) { this.doubles[i] = that.doubles[i]; } } /** Reset all values to zero. */ public final void reset() { for(int i = 0; i < doubles.length; i++) { this.doubles[i] = 0.0; } } /** Add the given value to e. */ public final void add(final E e, final double value) { doubles[e.ordinal()] += value; } /** Add the values of that object to this. */ public final void add(final EnumDoubles<E> that) { for(int i = 0; i < doubles.length; i++) { this.doubles[i] += that.doubles[i]; } } /** Subtract the given value from e. */ public final void subtract(final E e, final double value) { doubles[e.ordinal()] -= value; } /** Subtract the values of this object from that object. */ public final void subtract(final EnumDoubles<E> that) { for(int i = 0; i < doubles.length; i++) { this.doubles[i] -= that.doubles[i]; } } @Override public boolean equals(Object obj) { if (obj == this) { return true; } else if (obj == null || !(obj instanceof EnumDoubles)) { return false; } final EnumDoubles<?> that = (EnumDoubles<?>)obj; return this.enumClass == that.enumClass && Arrays.equals(this.doubles, that.doubles); } @Override public int hashCode() { return Arrays.hashCode(doubles); } @Override public String toString() { final E[] enumConstants = enumClass.getEnumConstants(); final StringBuilder b = new StringBuilder(); for(int i = 0; i < doubles.length; i++) { final String name = enumConstants[i].name(); b.append(name).append("=").append(doubles[i]).append(", "); } return b.substring(0, b.length() - 2); } }
3,834
28.728682
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.io.Serializable; /** * Bit format in a long. */ public class LongBitFormat implements Serializable { private static final long serialVersionUID = 1L; private final String NAME; /** Bit offset */ private final int OFFSET; /** Bit length */ private final int LENGTH; /** Minimum value */ private final long MIN; /** Maximum value */ private final long MAX; /** Bit mask */ private final long MASK; public LongBitFormat(String name, LongBitFormat previous, int length, long min) { NAME = name; OFFSET = previous == null? 0: previous.OFFSET + previous.LENGTH; LENGTH = length; MIN = min; MAX = ((-1L) >>> (64 - LENGTH)); MASK = MAX << OFFSET; } /** Retrieve the value from the record. */ public long retrieve(long record) { return (record & MASK) >>> OFFSET; } /** Combine the value to the record. */ public long combine(long value, long record) { if (value < MIN) { throw new IllegalArgumentException( "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN); } if (value > MAX) { throw new IllegalArgumentException( "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX); } return (record & ~MASK) | (value << OFFSET); } public long getMin() { return MIN; } }
2,166
29.097222
83
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.xml.sax.ContentHandler; import org.xml.sax.SAXException; import org.xml.sax.helpers.AttributesImpl; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.TreeMap; /** * General xml utilities. * */ @InterfaceAudience.Private @InterfaceStability.Unstable public class XMLUtils { /** * Exception that reflects an invalid XML document. */ static public class InvalidXmlException extends RuntimeException { private static final long serialVersionUID = 1L; public InvalidXmlException(String s) { super(s); } } /** * Exception that reflects a string that cannot be unmangled. */ public static class UnmanglingError extends RuntimeException { private static final long serialVersionUID = 1L; public UnmanglingError(String str, Exception e) { super(str, e); } public UnmanglingError(String str) { super(str); } } /** * Given a code point, determine if it should be mangled before being * represented in an XML document. * * Any code point that isn't valid in XML must be mangled. * See http://en.wikipedia.org/wiki/Valid_characters_in_XML for a * quick reference, or the w3 standard for the authoritative reference. * * @param cp The code point * @return True if the code point should be mangled */ private static boolean codePointMustBeMangled(int cp) { if (cp < 0x20) { return ((cp != 0x9) && (cp != 0xa) && (cp != 0xd)); } else if ((0xd7ff < cp) && (cp < 0xe000)) { return true; } else if ((cp == 0xfffe) || (cp == 0xffff)) { return true; } else if (cp == 0x5c) { // we mangle backslash to simplify decoding... it's // easier if backslashes always begin mangled sequences. return true; } return false; } private static final int NUM_SLASH_POSITIONS = 4; private static String mangleCodePoint(int cp) { return String.format("\\%0" + NUM_SLASH_POSITIONS + "x;", cp); } private static String codePointToEntityRef(int cp) { switch (cp) { case '&': return "&amp;"; case '\"': return "&quot;"; case '\'': return "&apos;"; case '<': return "&lt;"; case '>': return "&gt;"; default: return null; } } /** * Mangle a string so that it can be represented in an XML document. * * There are three kinds of code points in XML: * - Those that can be represented normally, * - Those that have to be escaped (for example, & must be represented * as &amp;) * - Those that cannot be represented at all in XML. * * The built-in SAX functions will handle the first two types for us just * fine. However, sometimes we come across a code point of the third type. * In this case, we have to mangle the string in order to represent it at * all. We also mangle backslash to avoid confusing a backslash in the * string with part our escape sequence. * * The encoding used here is as follows: an illegal code point is * represented as '\ABCD;', where ABCD is the hexadecimal value of * the code point. * * @param str The input string. * * @return The mangled string. */ public static String mangleXmlString(String str, boolean createEntityRefs) { final StringBuilder bld = new StringBuilder(); final int length = str.length(); for (int offset = 0; offset < length; ) { final int cp = str.codePointAt(offset); final int len = Character.charCount(cp); if (codePointMustBeMangled(cp)) { bld.append(mangleCodePoint(cp)); } else { String entityRef = null; if (createEntityRefs) { entityRef = codePointToEntityRef(cp); } if (entityRef != null) { bld.append(entityRef); } else { for (int i = 0; i < len; i++) { bld.append(str.charAt(offset + i)); } } } offset += len; } return bld.toString(); } /** * Demangle a string from an XML document. * See {@link #mangleXmlString(String, boolean)} for a description of the * mangling format. * * @param str The string to be demangled. * * @return The unmangled string * @throws UnmanglingError if the input is malformed. */ public static String unmangleXmlString(String str, boolean decodeEntityRefs) throws UnmanglingError { int slashPosition = -1; String escapedCp = ""; StringBuilder bld = new StringBuilder(); StringBuilder entityRef = null; for (int i = 0; i < str.length(); i++) { char ch = str.charAt(i); if (entityRef != null) { entityRef.append(ch); if (ch == ';') { String e = entityRef.toString(); if (e.equals("&quot;")) { bld.append("\""); } else if (e.equals("&apos;")) { bld.append("\'"); } else if (e.equals("&amp;")) { bld.append("&"); } else if (e.equals("&lt;")) { bld.append("<"); } else if (e.equals("&gt;")) { bld.append(">"); } else { throw new UnmanglingError("Unknown entity ref " + e); } entityRef = null; } } else if ((slashPosition >= 0) && (slashPosition < NUM_SLASH_POSITIONS)) { escapedCp += ch; ++slashPosition; } else if (slashPosition == NUM_SLASH_POSITIONS) { if (ch != ';') { throw new UnmanglingError("unterminated code point escape: " + "expected semicolon at end."); } try { bld.appendCodePoint(Integer.parseInt(escapedCp, 16)); } catch (NumberFormatException e) { throw new UnmanglingError("error parsing unmangling escape code", e); } escapedCp = ""; slashPosition = -1; } else if (ch == '\\') { slashPosition = 0; } else { boolean startingEntityRef = false; if (decodeEntityRefs) { startingEntityRef = (ch == '&'); } if (startingEntityRef) { entityRef = new StringBuilder(); entityRef.append("&"); } else { bld.append(ch); } } } if (entityRef != null) { throw new UnmanglingError("unterminated entity ref starting with " + entityRef.toString()); } else if (slashPosition != -1) { throw new UnmanglingError("unterminated code point escape: string " + "broke off in the middle"); } return bld.toString(); } /** * Add a SAX tag with a string inside. * * @param contentHandler the SAX content handler * @param tag the element tag to use * @param val the string to put inside the tag */ public static void addSaxString(ContentHandler contentHandler, String tag, String val) throws SAXException { contentHandler.startElement("", "", tag, new AttributesImpl()); char c[] = mangleXmlString(val, false).toCharArray(); contentHandler.characters(c, 0, c.length); contentHandler.endElement("", "", tag); } /** * Represents a bag of key-value pairs encountered during parsing an XML * file. */ static public class Stanza { private final TreeMap<String, LinkedList <Stanza > > subtrees; /** The unmangled value of this stanza. */ private String value; public Stanza() { subtrees = new TreeMap<String, LinkedList <Stanza > >(); value = ""; } public void setValue(String value) { this.value = value; } public String getValue() { return this.value; } /** * Discover if a stanza has a given entry. * * @param name entry to look for * * @return true if the entry was found */ public boolean hasChildren(String name) { return subtrees.containsKey(name); } /** * Pull an entry from a stanza. * * @param name entry to look for * * @return the entry */ public List<Stanza> getChildren(String name) throws InvalidXmlException { LinkedList <Stanza> children = subtrees.get(name); if (children == null) { throw new InvalidXmlException("no entry found for " + name); } return children; } /** * Pull a string entry from a stanza. * * @param name entry to look for * * @return the entry */ public String getValue(String name) throws InvalidXmlException { String ret = getValueOrNull(name); if (ret == null) { throw new InvalidXmlException("no entry found for " + name); } return ret; } /** * Pull a string entry from a stanza, or null. * * @param name entry to look for * * @return the entry, or null if it was not found. */ public String getValueOrNull(String name) throws InvalidXmlException { if (!subtrees.containsKey(name)) { return null; } LinkedList <Stanza> l = subtrees.get(name); if (l.size() != 1) { throw new InvalidXmlException("More than one value found for " + name); } return l.get(0).getValue(); } /** * Add an entry to a stanza. * * @param name name of the entry to add * @param child the entry to add */ public void addChild(String name, Stanza child) { LinkedList<Stanza> l; if (subtrees.containsKey(name)) { l = subtrees.get(name); } else { l = new LinkedList<Stanza>(); subtrees.put(name, l); } l.add(child); } /** * Convert a stanza to a human-readable string. */ @Override public String toString() { StringBuilder bld = new StringBuilder(); bld.append("{"); if (!value.equals("")) { bld.append("\"").append(value).append("\""); } String prefix = ""; for (Map.Entry<String, LinkedList <Stanza > > entry : subtrees.entrySet()) { String key = entry.getKey(); LinkedList <Stanza > ll = entry.getValue(); for (Stanza child : ll) { bld.append(prefix); bld.append("<").append(key).append(">"); bld.append(child.toString()); prefix = ", "; } } bld.append("}"); return bld.toString(); } } }
11,546
29.386842
82
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.util.Arrays; import java.util.HashMap; import com.google.common.base.Preconditions; import org.apache.commons.lang.ArrayUtils; /** * Counters for an enum type. * * For example, suppose there is an enum type * <pre> * enum Fruit { APPLE, ORANGE, GRAPE } * </pre> * An {@link EnumCounters} object can be created for counting the numbers of * APPLE, ORANGLE and GRAPE. * * @param <E> the enum type */ public class EnumCounters<E extends Enum<E>> { /** The class of the enum. */ private final Class<E> enumClass; /** An array of longs corresponding to the enum type. */ private final long[] counters; /** * Construct counters for the given enum constants. * @param enumClass the enum class of the counters. */ public EnumCounters(final Class<E> enumClass) { final E[] enumConstants = enumClass.getEnumConstants(); Preconditions.checkNotNull(enumConstants); this.enumClass = enumClass; this.counters = new long[enumConstants.length]; } public EnumCounters(final Class<E> enumClass, long defaultVal) { final E[] enumConstants = enumClass.getEnumConstants(); Preconditions.checkNotNull(enumConstants); this.enumClass = enumClass; this.counters = new long[enumConstants.length]; reset(defaultVal); } /** @return the value of counter e. */ public final long get(final E e) { return counters[e.ordinal()]; } /** @return the values of counter as a shadow copy of array*/ public long[] asArray() { return ArrayUtils.clone(counters); } /** Negate all counters. */ public final void negation() { for(int i = 0; i < counters.length; i++) { counters[i] = -counters[i]; } } /** Set counter e to the given value. */ public final void set(final E e, final long value) { counters[e.ordinal()] = value; } /** Set this counters to that counters. */ public final void set(final EnumCounters<E> that) { for(int i = 0; i < counters.length; i++) { this.counters[i] = that.counters[i]; } } /** Reset all counters to zero. */ public final void reset() { reset(0L); } /** Add the given value to counter e. */ public final void add(final E e, final long value) { counters[e.ordinal()] += value; } /** Add that counters to this counters. */ public final void add(final EnumCounters<E> that) { for(int i = 0; i < counters.length; i++) { this.counters[i] += that.counters[i]; } } /** Subtract the given value from counter e. */ public final void subtract(final E e, final long value) { counters[e.ordinal()] -= value; } /** Subtract this counters from that counters. */ public final void subtract(final EnumCounters<E> that) { for(int i = 0; i < counters.length; i++) { this.counters[i] -= that.counters[i]; } } /** @return the sum of all counters. */ public final long sum() { long sum = 0; for(int i = 0; i < counters.length; i++) { sum += counters[i]; } return sum; } @Override public boolean equals(Object obj) { if (obj == this) { return true; } else if (obj == null || !(obj instanceof EnumCounters)) { return false; } final EnumCounters<?> that = (EnumCounters<?>)obj; return this.enumClass == that.enumClass && Arrays.equals(this.counters, that.counters); } @Override public int hashCode() { return Arrays.hashCode(counters); } @Override public String toString() { final E[] enumConstants = enumClass.getEnumConstants(); final StringBuilder b = new StringBuilder(); for(int i = 0; i < counters.length; i++) { final String name = enumConstants[i].name(); b.append(name).append("=").append(counters[i]).append(", "); } return b.substring(0, b.length() - 2); } public final void reset(long val) { for(int i = 0; i < counters.length; i++) { this.counters[i] = val; } } public boolean allLessOrEqual(long val) { for (long c : counters) { if (c > val) { return false; } } return true; } public boolean anyGreaterOrEqual(long val) { for (long c: counters) { if (c >= val) { return true; } } return false; } /** * A factory for creating counters. * * @param <E> the enum type * @param <C> the counter type */ public static interface Factory<E extends Enum<E>, C extends EnumCounters<E>> { /** Create a new counters instance. */ public C newInstance(); } /** * A key-value map which maps the keys to {@link EnumCounters}. * Note that null key is supported. * * @param <K> the key type * @param <E> the enum type * @param <C> the counter type */ public static class Map<K, E extends Enum<E>, C extends EnumCounters<E>> { /** The factory for creating counters. */ private final Factory<E, C> factory; /** Key-to-Counts map. */ private final java.util.Map<K, C> counts = new HashMap<K, C>(); /** Construct a map. */ public Map(final Factory<E, C> factory) { this.factory = factory; } /** @return the counters for the given key. */ public final C getCounts(final K key) { C c = counts.get(key); if (c == null) { c = factory.newInstance(); counts.put(key, c); } return c; } /** @return the sum of the values of all the counters. */ public final C sum() { final C sum = factory.newInstance(); for(C c : counts.values()) { sum.add(c); } return sum; } /** @return the sum of the values of all the counters for e. */ public final long sum(final E e) { long sum = 0; for(C c : counts.values()) { sum += c.get(e); } return sum; } @Override public String toString() { return counts.toString(); } } }
6,769
26.408907
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Charsets; /** * Class that represents a file on disk which persistently stores * a single <code>long</code> value. The file is updated atomically * and durably (i.e fsynced). */ @InterfaceAudience.Private public class PersistentLongFile { private static final Log LOG = LogFactory.getLog( PersistentLongFile.class); private final File file; private final long defaultVal; private long value; private boolean loaded = false; public PersistentLongFile(File file, long defaultVal) { this.file = file; this.defaultVal = defaultVal; } public long get() throws IOException { if (!loaded) { value = readFile(file, defaultVal); loaded = true; } return value; } public void set(long newVal) throws IOException { if (value != newVal || !loaded) { writeFile(file, newVal); } value = newVal; loaded = true; } /** * Atomically write the given value to the given file, including fsyncing. * * @param file destination file * @param val value to write * @throws IOException if the file cannot be written */ public static void writeFile(File file, long val) throws IOException { AtomicFileOutputStream fos = new AtomicFileOutputStream(file); try { fos.write(String.valueOf(val).getBytes(Charsets.UTF_8)); fos.write('\n'); fos.close(); fos = null; } finally { if (fos != null) { fos.abort(); } } } public static long readFile(File file, long defaultVal) throws IOException { long val = defaultVal; if (file.exists()) { BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream( file), Charsets.UTF_8)); try { val = Long.parseLong(br.readLine()); br.close(); br = null; } finally { IOUtils.cleanup(LOG, br); } } return val; } }
3,132
28.009259
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Holder.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; /** * A Holder is simply a wrapper around some other object. This is useful * in particular for storing immutable values like boxed Integers in a * collection without having to do the &quot;lookup&quot; of the value twice. */ public class Holder<T> { public T held; public Holder(T held) { this.held = held; } @Override public String toString() { return String.valueOf(held); } }
1,255
32.945946
77
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FilterOutputStream; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIOException; /** * A FileOutputStream that has the property that it will only show * up at its destination once it has been entirely written and flushed * to disk. While being written, it will use a .tmp suffix. * * When the output stream is closed, it is flushed, fsynced, and * will be moved into place, overwriting any file that already * exists at that location. * * <b>NOTE</b>: on Windows platforms, it will not atomically * replace the target file - instead the target file is deleted * before this one is moved into place. */ public class AtomicFileOutputStream extends FilterOutputStream { private static final String TMP_EXTENSION = ".tmp"; private final static Log LOG = LogFactory.getLog( AtomicFileOutputStream.class); private final File origFile; private final File tmpFile; public AtomicFileOutputStream(File f) throws FileNotFoundException { // Code unfortunately must be duplicated below since we can't assign anything // before calling super super(new FileOutputStream(new File(f.getParentFile(), f.getName() + TMP_EXTENSION))); origFile = f.getAbsoluteFile(); tmpFile = new File(f.getParentFile(), f.getName() + TMP_EXTENSION).getAbsoluteFile(); } @Override public void close() throws IOException { boolean triedToClose = false, success = false; try { flush(); ((FileOutputStream)out).getChannel().force(true); triedToClose = true; super.close(); success = true; } finally { if (success) { boolean renamed = tmpFile.renameTo(origFile); if (!renamed) { // On windows, renameTo does not replace. if (origFile.exists() && !origFile.delete()) { throw new IOException("Could not delete original file " + origFile); } try { NativeIO.renameTo(tmpFile, origFile); } catch (NativeIOException e) { throw new IOException("Could not rename temporary file " + tmpFile + " to " + origFile + " due to failure in native rename. " + e.toString()); } } } else { if (!triedToClose) { // If we failed when flushing, try to close it to not leak an FD IOUtils.closeStream(out); } // close wasn't successful, try to delete the tmp file if (!tmpFile.delete()) { LOG.warn("Unable to delete tmp file " + tmpFile); } } } } /** * Close the atomic file, but do not "commit" the temporary file * on top of the destination. This should be used if there is a failure * in writing. */ public void abort() { try { super.close(); } catch (IOException ioe) { LOG.warn("Unable to abort file " + tmpFile, ioe); } if (!tmpFile.delete()) { LOG.warn("Unable to delete tmp file during abort " + tmpFile); } } }
4,112
33.563025
90
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteBufferOutputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * OutputStream that writes into a {@link ByteBuffer}. */ @InterfaceAudience.Private @InterfaceStability.Stable public class ByteBufferOutputStream extends OutputStream { private final ByteBuffer buf; public ByteBufferOutputStream(ByteBuffer buf) { this.buf = buf; } @Override public void write(int b) throws IOException { buf.put((byte)b); } @Override public void write(byte[] b, int off, int len) throws IOException { buf.put(b, off, len); } }
1,535
30.346939
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ReferenceCountMap.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.util; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; /** * Class for de-duplication of instances. <br> * Hold the references count to a single instance. If there are no references * then the entry will be removed.<br> * Type E should implement {@link ReferenceCounter}<br> * Note: This class is NOT thread-safe. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class ReferenceCountMap<E extends ReferenceCountMap.ReferenceCounter> { private Map<E, E> referenceMap = new HashMap<E, E>(); /** * Add the reference. If the instance already present, just increase the * reference count. * * @param key Key to put in reference map * @return Referenced instance */ public E put(E key) { E value = referenceMap.get(key); if (value == null) { value = key; referenceMap.put(key, value); } value.incrementAndGetRefCount(); return value; } /** * Delete the reference. Decrease the reference count for the instance, if * any. On all references removal delete the instance from the map. * * @param key Key to remove the reference. */ public void remove(E key) { E value = referenceMap.get(key); if (value != null && value.decrementAndGetRefCount() == 0) { referenceMap.remove(key); } } /** * Get entries in the reference Map. * * @return */ @VisibleForTesting public ImmutableList<E> getEntries() { return new ImmutableList.Builder<E>().addAll(referenceMap.keySet()).build(); } /** * Get the reference count for the key */ public long getReferenceCount(E key) { ReferenceCounter counter = referenceMap.get(key); if (counter != null) { return counter.getRefCount(); } return 0; } /** * Get the number of unique elements */ public int getUniqueElementsSize() { return referenceMap.size(); } /** * Clear the contents */ @VisibleForTesting public void clear() { referenceMap.clear(); } /** * Interface for the reference count holder */ public static interface ReferenceCounter { public int getRefCount(); public int incrementAndGetRefCount(); public int decrementAndGetRefCount(); } }
3,282
26.588235
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import static com.google.common.base.Preconditions.checkNotNull; import java.util.Date; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.IntrusiveCollection.Element; import com.google.common.base.Preconditions; /** * Namenode class that tracks state related to a cached path. * * This is an implementation class, not part of the public API. */ @InterfaceAudience.Private public final class CacheDirective implements IntrusiveCollection.Element { private final long id; private final String path; private final short replication; private CachePool pool; private final long expiryTime; private long bytesNeeded; private long bytesCached; private long filesNeeded; private long filesCached; private Element prev; private Element next; public CacheDirective(CacheDirectiveInfo info) { this( info.getId(), info.getPath().toUri().getPath(), info.getReplication(), info.getExpiration().getAbsoluteMillis()); } public CacheDirective(long id, String path, short replication, long expiryTime) { Preconditions.checkArgument(id > 0); this.id = id; this.path = checkNotNull(path); Preconditions.checkArgument(replication > 0); this.replication = replication; this.expiryTime = expiryTime; } public long getId() { return id; } public String getPath() { return path; } public short getReplication() { return replication; } public CachePool getPool() { return pool; } /** * @return When this directive expires, in milliseconds since Unix epoch */ public long getExpiryTime() { return expiryTime; } /** * @return When this directive expires, as an ISO-8601 formatted string. */ public String getExpiryTimeString() { return DFSUtil.dateToIso8601String(new Date(expiryTime)); } /** * Returns a {@link CacheDirectiveInfo} based on this CacheDirective. * <p> * This always sets an absolute expiry time, never a relative TTL. */ public CacheDirectiveInfo toInfo() { return new CacheDirectiveInfo.Builder(). setId(id). setPath(new Path(path)). setReplication(replication). setPool(pool.getPoolName()). setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiryTime)). build(); } public CacheDirectiveStats toStats() { return new CacheDirectiveStats.Builder(). setBytesNeeded(bytesNeeded). setBytesCached(bytesCached). setFilesNeeded(filesNeeded). setFilesCached(filesCached). setHasExpired(new Date().getTime() > expiryTime). build(); } public CacheDirectiveEntry toEntry() { return new CacheDirectiveEntry(toInfo(), toStats()); } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append("{ id:").append(id). append(", path:").append(path). append(", replication:").append(replication). append(", pool:").append(pool). append(", expiryTime: ").append(getExpiryTimeString()). append(", bytesNeeded:").append(bytesNeeded). append(", bytesCached:").append(bytesCached). append(", filesNeeded:").append(filesNeeded). append(", filesCached:").append(filesCached). append(" }"); return builder.toString(); } @Override public boolean equals(Object o) { if (o == null) { return false; } if (o == this) { return true; } if (o.getClass() != this.getClass()) { return false; } CacheDirective other = (CacheDirective)o; return id == other.id; } @Override public int hashCode() { return new HashCodeBuilder().append(id).toHashCode(); } // // Stats related getters and setters // /** * Resets the byte and file statistics being tracked by this CacheDirective. */ public void resetStatistics() { bytesNeeded = 0; bytesCached = 0; filesNeeded = 0; filesCached = 0; } public long getBytesNeeded() { return bytesNeeded; } public void addBytesNeeded(long bytes) { this.bytesNeeded += bytes; pool.addBytesNeeded(bytes); } public long getBytesCached() { return bytesCached; } public void addBytesCached(long bytes) { this.bytesCached += bytes; pool.addBytesCached(bytes); } public long getFilesNeeded() { return filesNeeded; } public void addFilesNeeded(long files) { this.filesNeeded += files; pool.addFilesNeeded(files); } public long getFilesCached() { return filesCached; } public void addFilesCached(long files) { this.filesCached += files; pool.addFilesCached(files); } // // IntrusiveCollection.Element implementation // @SuppressWarnings("unchecked") @Override // IntrusiveCollection.Element public void insertInternal(IntrusiveCollection<? extends Element> list, Element prev, Element next) { assert this.pool == null; this.pool = ((CachePool.DirectiveList)list).getCachePool(); this.prev = prev; this.next = next; } @Override // IntrusiveCollection.Element public void setPrev(IntrusiveCollection<? extends Element> list, Element prev) { assert list == pool.getDirectiveList(); this.prev = prev; } @Override // IntrusiveCollection.Element public void setNext(IntrusiveCollection<? extends Element> list, Element next) { assert list == pool.getDirectiveList(); this.next = next; } @Override // IntrusiveCollection.Element public void removeInternal(IntrusiveCollection<? extends Element> list) { assert list == pool.getDirectiveList(); this.pool = null; this.prev = null; this.next = null; } @Override // IntrusiveCollection.Element public Element getPrev(IntrusiveCollection<? extends Element> list) { if (list != pool.getDirectiveList()) { return null; } return this.prev; } @Override // IntrusiveCollection.Element public Element getNext(IntrusiveCollection<? extends Element> list) { if (list != pool.getDirectiveList()) { return null; } return this.next; } @Override // IntrusiveCollection.Element public boolean isInList(IntrusiveCollection<? extends Element> list) { return pool == null ? false : list == pool.getDirectiveList(); } };
7,391
26.479554
82
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsBlocksMetadata.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; /** * Augments an array of blocks on a datanode with additional information about * where the block is stored. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class HdfsBlocksMetadata { /** The block pool that was queried */ private final String blockPoolId; /** * List of blocks */ private final long[] blockIds; /** * List of volumes */ private final List<byte[]> volumeIds; /** * List of indexes into <code>volumeIds</code>, one per block in * <code>blocks</code>. A value of Integer.MAX_VALUE indicates that the * block was not found. */ private final List<Integer> volumeIndexes; /** * Constructs HdfsBlocksMetadata. * * @param blockIds * List of blocks described * @param volumeIds * List of potential volume identifiers, specifying volumes where * blocks may be stored * @param volumeIndexes * Indexes into the list of volume identifiers, one per block */ public HdfsBlocksMetadata(String blockPoolId, long[] blockIds, List<byte[]> volumeIds, List<Integer> volumeIndexes) { Preconditions.checkArgument(blockIds.length == volumeIndexes.size(), "Argument lengths should match"); this.blockPoolId = blockPoolId; this.blockIds = blockIds; this.volumeIds = volumeIds; this.volumeIndexes = volumeIndexes; } /** * Get the array of blocks. * * @return array of blocks */ public long[] getBlockIds() { return blockIds; } /** * Get the list of volume identifiers in raw byte form. * * @return list of ids */ public List<byte[]> getVolumeIds() { return volumeIds; } /** * Get a list of indexes into the array of {@link VolumeId}s, one per block. * * @return list of indexes */ public List<Integer> getVolumeIndexes() { return volumeIndexes; } @Override public String toString() { return "Metadata for " + blockIds.length + " blocks in " + blockPoolId + ": " + Joiner.on(",").join(Longs.asList(blockIds)); } }
3,207
27.642857
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; /** * Indicates a failure manipulating an ACL. */ @InterfaceAudience.Private public class AclException extends IOException { private static final long serialVersionUID = 1L; /** * Creates a new AclException. * * @param message String message */ public AclException(String message) { super(message); } }
1,268
30.725
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.BatchedRemoteIterator; import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.ipc.RemoteException; import org.apache.htrace.Sampler; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; import com.google.common.base.Preconditions; /** * CacheDirectiveIterator is a remote iterator that iterates cache directives. * It supports retrying in case of namenode failover. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class CacheDirectiveIterator extends BatchedRemoteIterator<Long, CacheDirectiveEntry> { private CacheDirectiveInfo filter; private final ClientProtocol namenode; private final Sampler<?> traceSampler; public CacheDirectiveIterator(ClientProtocol namenode, CacheDirectiveInfo filter, Sampler<?> traceSampler) { super(0L); this.namenode = namenode; this.filter = filter; this.traceSampler = traceSampler; } private static CacheDirectiveInfo removeIdFromFilter(CacheDirectiveInfo filter) { CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder(filter); builder.setId(null); return builder.build(); } /** * Used for compatibility when communicating with a server version that * does not support filtering directives by ID. */ private static class SingleEntry implements BatchedEntries<CacheDirectiveEntry> { private final CacheDirectiveEntry entry; public SingleEntry(final CacheDirectiveEntry entry) { this.entry = entry; } @Override public CacheDirectiveEntry get(int i) { if (i > 0) { return null; } return entry; } @Override public int size() { return 1; } @Override public boolean hasMore() { return false; } } @Override public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey) throws IOException { BatchedEntries<CacheDirectiveEntry> entries = null; TraceScope scope = Trace.startSpan("listCacheDirectives", traceSampler); try { entries = namenode.listCacheDirectives(prevKey, filter); } catch (IOException e) { if (e.getMessage().contains("Filtering by ID is unsupported")) { // Retry case for old servers, do the filtering client-side long id = filter.getId(); filter = removeIdFromFilter(filter); // Using id - 1 as prevId should get us a window containing the id // This is somewhat brittle, since it depends on directives being // returned in order of ascending ID. entries = namenode.listCacheDirectives(id - 1, filter); for (int i=0; i<entries.size(); i++) { CacheDirectiveEntry entry = entries.get(i); if (entry.getInfo().getId().equals((Long)id)) { return new SingleEntry(entry); } } throw new RemoteException(InvalidRequestException.class.getName(), "Did not find requested id " + id); } throw e; } finally { scope.close(); } Preconditions.checkNotNull(entries); return entries; } @Override public Long elementToPrevKey(CacheDirectiveEntry entry) { return entry.getInfo().getId(); } }
4,219
31.21374
83
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; /** * @deprecated Please use {@link HdfsConstants}. This class * is left only for other ecosystem projects which depended on * it for SafemodeAction and DatanodeReport types. */ @Deprecated public final class FSConstants extends HdfsConstants { }
1,100
38.321429
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.util.Comparator; import java.util.Map; import java.util.SortedSet; import java.util.TreeSet; import org.apache.hadoop.classification.InterfaceAudience; /** * This class tracks changes in the layout version of HDFS. * * Layout version is changed for following reasons: * <ol> * <li>The layout of how namenode or datanode stores information * on disk changes.</li> * <li>A new operation code is added to the editlog.</li> * <li>Modification such as format of a record, content of a record * in editlog or fsimage.</li> * </ol> * <br> * <b>How to update layout version:<br></b> * When a change requires new layout version, please add an entry into * {@link Feature} with a short enum name, new layout version and description * of the change. Please see {@link Feature} for further details. * <br> */ @InterfaceAudience.Private public class LayoutVersion { /** * Version in which HDFS-2991 was fixed. This bug caused OP_ADD to * sometimes be skipped for append() calls. If we see such a case when * loading the edits, but the version is known to have that bug, we * workaround the issue. Otherwise we should consider it a corruption * and bail. */ public static final int BUGFIX_HDFS_2991_VERSION = -40; /** * The interface to be implemented by NameNode and DataNode layout features */ public interface LayoutFeature { public FeatureInfo getInfo(); } /** * Enums for features that change the layout version before rolling * upgrade is supported. * <br><br> * To add a new layout version: * <ul> * <li>Define a new enum constant with a short enum name, the new layout version * and description of the added feature.</li> * <li>When adding a layout version with an ancestor that is not same as * its immediate predecessor, use the constructor where a specific ancestor * can be passed. * </li> * </ul> */ public static enum Feature implements LayoutFeature { NAMESPACE_QUOTA(-16, "Support for namespace quotas"), FILE_ACCESS_TIME(-17, "Support for access time on files"), DISKSPACE_QUOTA(-18, "Support for disk space quotas"), STICKY_BIT(-19, "Support for sticky bits"), APPEND_RBW_DIR(-20, "Datanode has \"rbw\" subdirectory for append"), ATOMIC_RENAME(-21, "Support for atomic rename"), CONCAT(-22, "Support for concat operation"), SYMLINKS(-23, "Support for symbolic links"), DELEGATION_TOKEN(-24, "Support for delegation tokens for security"), FSIMAGE_COMPRESSION(-25, "Support for fsimage compression"), FSIMAGE_CHECKSUM(-26, "Support checksum for fsimage"), REMOVE_REL13_DISK_LAYOUT_SUPPORT(-27, "Remove support for 0.13 disk layout"), EDITS_CHESKUM(-28, "Support checksum for editlog"), UNUSED(-29, "Skipped version"), FSIMAGE_NAME_OPTIMIZATION(-30, "Store only last part of path in fsimage"), RESERVED_REL20_203(-31, -19, "Reserved for release 0.20.203", true, DELEGATION_TOKEN), RESERVED_REL20_204(-32, -31, "Reserved for release 0.20.204", true), RESERVED_REL22(-33, -27, "Reserved for release 0.22", true), RESERVED_REL23(-34, -30, "Reserved for release 0.23", true), FEDERATION(-35, "Support for namenode federation"), LEASE_REASSIGNMENT(-36, "Support for persisting lease holder reassignment"), STORED_TXIDS(-37, "Transaction IDs are stored in edits log and image files"), TXID_BASED_LAYOUT(-38, "File names in NN Storage are based on transaction IDs"), EDITLOG_OP_OPTIMIZATION(-39, "Use LongWritable and ShortWritable directly instead of ArrayWritable of UTF8"), OPTIMIZE_PERSIST_BLOCKS(-40, "Serialize block lists with delta-encoded variable length ints, " + "add OP_UPDATE_BLOCKS"), RESERVED_REL1_2_0(-41, -32, "Reserved for release 1.2.0", true, CONCAT), ADD_INODE_ID(-42, -40, "Assign a unique inode id for each inode", false), SNAPSHOT(-43, "Support for snapshot feature"), RESERVED_REL1_3_0(-44, -41, "Reserved for release 1.3.0", true, ADD_INODE_ID, SNAPSHOT, FSIMAGE_NAME_OPTIMIZATION), OPTIMIZE_SNAPSHOT_INODES(-45, -43, "Reduce snapshot inode memory footprint", false), SEQUENTIAL_BLOCK_ID(-46, "Allocate block IDs sequentially and store " + "block IDs in the edits log and image files"), EDITLOG_SUPPORT_RETRYCACHE(-47, "Record ClientId and CallId in editlog to " + "enable rebuilding retry cache in case of HA failover"), EDITLOG_ADD_BLOCK(-48, "Add new editlog that only records allocation of " + "the new block instead of the entire block list"), ADD_DATANODE_AND_STORAGE_UUIDS(-49, "Replace StorageID with DatanodeUuid." + " Use distinct StorageUuid per storage directory."), ADD_LAYOUT_FLAGS(-50, "Add support for layout flags."), CACHING(-51, "Support for cache pools and path-based caching"), // Hadoop 2.4.0 PROTOBUF_FORMAT(-52, "Use protobuf to serialize FSImage"), EXTENDED_ACL(-53, "Extended ACL"), RESERVED_REL2_4_0(-54, -51, "Reserved for release 2.4.0", true, PROTOBUF_FORMAT, EXTENDED_ACL); private final FeatureInfo info; /** * Feature that is added at layout version {@code lv} - 1. * @param lv new layout version with the addition of this feature * @param description description of the feature */ Feature(final int lv, final String description) { this(lv, lv + 1, description, false); } /** * Feature that is added at layout version {@code ancestoryLV}. * @param lv new layout version with the addition of this feature * @param ancestorLV layout version from which the new lv is derived from. * @param description description of the feature * @param reserved true when this is a layout version reserved for previous * version * @param features set of features that are to be enabled for this version */ Feature(final int lv, final int ancestorLV, final String description, boolean reserved, Feature... features) { info = new FeatureInfo(lv, ancestorLV, description, reserved, features); } @Override public FeatureInfo getInfo() { return info; } } /** Feature information. */ public static class FeatureInfo { private final int lv; private final int ancestorLV; private final Integer minCompatLV; private final String description; private final boolean reserved; private final LayoutFeature[] specialFeatures; public FeatureInfo(final int lv, final int ancestorLV, final String description, boolean reserved, LayoutFeature... specialFeatures) { this(lv, ancestorLV, null, description, reserved, specialFeatures); } public FeatureInfo(final int lv, final int ancestorLV, Integer minCompatLV, final String description, boolean reserved, LayoutFeature... specialFeatures) { this.lv = lv; this.ancestorLV = ancestorLV; this.minCompatLV = minCompatLV; this.description = description; this.reserved = reserved; this.specialFeatures = specialFeatures; } /** * Accessor method for feature layout version * @return int lv value */ public int getLayoutVersion() { return lv; } /** * Accessor method for feature ancestor layout version * @return int ancestor LV value */ public int getAncestorLayoutVersion() { return ancestorLV; } /** * Accessor method for feature minimum compatible layout version. If the * feature does not define a minimum compatible layout version, then this * method returns the feature's own layout version. This would indicate * that the feature cannot provide compatibility with any prior layout * version. * * @return int minimum compatible LV value */ public int getMinimumCompatibleLayoutVersion() { return minCompatLV != null ? minCompatLV : lv; } /** * Accessor method for feature description * @return String feature description */ public String getDescription() { return description; } public boolean isReservedForOldRelease() { return reserved; } public LayoutFeature[] getSpecialFeatures() { return specialFeatures; } } static class LayoutFeatureComparator implements Comparator<LayoutFeature> { @Override public int compare(LayoutFeature arg0, LayoutFeature arg1) { return arg0.getInfo().getLayoutVersion() - arg1.getInfo().getLayoutVersion(); } } public static void updateMap(Map<Integer, SortedSet<LayoutFeature>> map, LayoutFeature[] features) { // Go through all the enum constants and build a map of // LayoutVersion <-> Set of all supported features in that LayoutVersion SortedSet<LayoutFeature> existingFeatures = new TreeSet<LayoutFeature>( new LayoutFeatureComparator()); for (SortedSet<LayoutFeature> s : map.values()) { existingFeatures.addAll(s); } LayoutFeature prevF = existingFeatures.isEmpty() ? null : existingFeatures.first(); for (LayoutFeature f : features) { final FeatureInfo info = f.getInfo(); int minCompatLV = info.getMinimumCompatibleLayoutVersion(); if (prevF != null && minCompatLV > prevF.getInfo().getMinimumCompatibleLayoutVersion()) { throw new AssertionError(String.format( "Features must be listed in order of minimum compatible layout " + "version. Check features %s and %s.", prevF, f)); } prevF = f; SortedSet<LayoutFeature> ancestorSet = map.get(info.getAncestorLayoutVersion()); if (ancestorSet == null) { // Empty set ancestorSet = new TreeSet<LayoutFeature>(new LayoutFeatureComparator()); map.put(info.getAncestorLayoutVersion(), ancestorSet); } SortedSet<LayoutFeature> featureSet = new TreeSet<LayoutFeature>(ancestorSet); if (info.getSpecialFeatures() != null) { for (LayoutFeature specialFeature : info.getSpecialFeatures()) { featureSet.add(specialFeature); } } featureSet.add(f); map.put(info.getLayoutVersion(), featureSet); } } /** * Gets formatted string that describes {@link LayoutVersion} information. */ public String getString(Map<Integer, SortedSet<LayoutFeature>> map, LayoutFeature[] values) { final StringBuilder buf = new StringBuilder(); buf.append("Feature List:\n"); for (LayoutFeature f : values) { final FeatureInfo info = f.getInfo(); buf.append(f).append(" introduced in layout version ") .append(info.getLayoutVersion()).append(" (") .append(info.getDescription()).append(")\n"); } buf.append("\n\nLayoutVersion and supported features:\n"); for (LayoutFeature f : values) { final FeatureInfo info = f.getInfo(); buf.append(info.getLayoutVersion()).append(": ") .append(map.get(info.getLayoutVersion())).append("\n"); } return buf.toString(); } /** * Returns true if a given feature is supported in the given layout version * @param map layout feature map * @param f Feature * @param lv LayoutVersion * @return true if {@code f} is supported in layout version {@code lv} */ public static boolean supports(Map<Integer, SortedSet<LayoutFeature>> map, final LayoutFeature f, final int lv) { final SortedSet<LayoutFeature> set = map.get(lv); return set != null && set.contains(f); } /** * Get the current layout version */ public static int getCurrentLayoutVersion(LayoutFeature[] features) { return getLastNonReservedFeature(features).getInfo().getLayoutVersion(); } /** * Gets the minimum compatible layout version. * * @param features all features to check * @return minimum compatible layout version */ public static int getMinimumCompatibleLayoutVersion( LayoutFeature[] features) { return getLastNonReservedFeature(features).getInfo() .getMinimumCompatibleLayoutVersion(); } static LayoutFeature getLastNonReservedFeature(LayoutFeature[] features) { for (int i = features.length -1; i >= 0; i--) { final FeatureInfo info = features[i].getInfo(); if (!info.isReservedForOldRelease()) { return features[i]; } } throw new AssertionError("All layout versions are reserved."); } }
13,320
37.836735
88
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.BatchedRemoteIterator; import org.apache.htrace.Sampler; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; /** * EncryptionZoneIterator is a remote iterator that iterates over encryption * zones. It supports retrying in case of namenode failover. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class EncryptionZoneIterator extends BatchedRemoteIterator<Long, EncryptionZone> { private final ClientProtocol namenode; private final Sampler<?> traceSampler; public EncryptionZoneIterator(ClientProtocol namenode, Sampler<?> traceSampler) { super(Long.valueOf(0)); this.namenode = namenode; this.traceSampler = traceSampler; } @Override public BatchedEntries<EncryptionZone> makeRequest(Long prevId) throws IOException { TraceScope scope = Trace.startSpan("listEncryptionZones", traceSampler); try { return namenode.listEncryptionZones(prevId); } finally { scope.close(); } } @Override public Long elementToPrevKey(EncryptionZone entry) { return entry.getId(); } }
2,139
31.923077
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.Path; /** * Thrown when a symbolic link is encountered in a path. */ @InterfaceAudience.Private @InterfaceStability.Evolving public final class UnresolvedPathException extends UnresolvedLinkException { private static final long serialVersionUID = 1L; private String path; // The path containing the link private String preceding; // The path part preceding the link private String remainder; // The path part following the link private String linkTarget; // The link's target /** * Used by RemoteException to instantiate an UnresolvedPathException. */ public UnresolvedPathException(String msg) { super(msg); } public UnresolvedPathException(String path, String preceding, String remainder, String linkTarget) { this.path = path; this.preceding = preceding; this.remainder = remainder; this.linkTarget = linkTarget; } /** * Return a path with the link resolved with the target. */ public Path getResolvedPath() throws IOException { // If the path is absolute we cam throw out the preceding part and // just append the remainder to the target, otherwise append each // piece to resolve the link in path. boolean noRemainder = (remainder == null || "".equals(remainder)); Path target = new Path(linkTarget); if (target.isUriPathAbsolute()) { return noRemainder ? target : new Path(target, remainder); } else { return noRemainder ? new Path(preceding, target) : new Path(new Path(preceding, linkTarget), remainder); } } @Override public String getMessage() { String msg = super.getMessage(); if (msg != null) { return msg; } String myMsg = "Unresolved path " + path; try { return getResolvedPath().toString(); } catch (IOException e) { // Ignore } return myMsg; } }
2,944
32.465909
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.BatchedRemoteIterator; import org.apache.htrace.Sampler; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; /** * CachePoolIterator is a remote iterator that iterates cache pools. * It supports retrying in case of namenode failover. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class CachePoolIterator extends BatchedRemoteIterator<String, CachePoolEntry> { private final ClientProtocol namenode; private final Sampler traceSampler; public CachePoolIterator(ClientProtocol namenode, Sampler traceSampler) { super(""); this.namenode = namenode; this.traceSampler = traceSampler; } @Override public BatchedEntries<CachePoolEntry> makeRequest(String prevKey) throws IOException { TraceScope scope = Trace.startSpan("listCachePools", traceSampler); try { return namenode.listCachePools(prevKey); } finally { scope.close(); } } @Override public String elementToPrevKey(CachePoolEntry entry) { return entry.getInfo().getPoolName(); } }
2,077
31.46875
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.Replica; import com.google.common.base.Preconditions; import com.google.protobuf.ByteString; import com.google.protobuf.CodedInputStream; import com.google.protobuf.CodedOutputStream; import com.google.protobuf.WireFormat; @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> { private final static int CHUNK_SIZE = 64*1024; // 64K private static long[] EMPTY_LONGS = new long[]{0, 0}; public static BlockListAsLongs EMPTY = new BlockListAsLongs() { @Override public int getNumberOfBlocks() { return 0; } @Override public ByteString getBlocksBuffer() { return ByteString.EMPTY; } @Override public long[] getBlockListAsLongs() { return EMPTY_LONGS; } @Override public Iterator<BlockReportReplica> iterator() { return Collections.emptyIterator(); } }; /** * Prepare an instance to in-place decode the given ByteString buffer * @param numBlocks - blocks in the buffer * @param blocksBuf - ByteString encoded varints * @return BlockListAsLongs */ public static BlockListAsLongs decodeBuffer(final int numBlocks, final ByteString blocksBuf) { return new BufferDecoder(numBlocks, blocksBuf); } /** * Prepare an instance to in-place decode the given ByteString buffers * @param numBlocks - blocks in the buffers * @param blocksBufs - list of ByteString encoded varints * @return BlockListAsLongs */ public static BlockListAsLongs decodeBuffers(final int numBlocks, final List<ByteString> blocksBufs) { // this doesn't actually copy the data return decodeBuffer(numBlocks, ByteString.copyFrom(blocksBufs)); } /** * Prepare an instance to in-place decode the given list of Longs. Note * it's much more efficient to decode ByteString buffers and only exists * for compatibility. * @param blocksList - list of longs * @return BlockListAsLongs */ public static BlockListAsLongs decodeLongs(List<Long> blocksList) { return blocksList.isEmpty() ? EMPTY : new LongsDecoder(blocksList); } /** * Prepare an instance to encode the collection of replicas into an * efficient ByteString. * @param replicas - replicas to encode * @return BlockListAsLongs */ public static BlockListAsLongs encode( final Collection<? extends Replica> replicas) { BlockListAsLongs.Builder builder = builder(); for (Replica replica : replicas) { builder.add(replica); } return builder.build(); } public static BlockListAsLongs readFrom(InputStream is) throws IOException { CodedInputStream cis = CodedInputStream.newInstance(is); int numBlocks = -1; ByteString blocksBuf = null; while (!cis.isAtEnd()) { int tag = cis.readTag(); int field = WireFormat.getTagFieldNumber(tag); switch(field) { case 0: break; case 1: numBlocks = (int)cis.readInt32(); break; case 2: blocksBuf = cis.readBytes(); break; default: cis.skipField(tag); break; } } if (numBlocks != -1 && blocksBuf != null) { return decodeBuffer(numBlocks, blocksBuf); } return null; } public void writeTo(OutputStream os) throws IOException { CodedOutputStream cos = CodedOutputStream.newInstance(os); cos.writeInt32(1, getNumberOfBlocks()); cos.writeBytes(2, getBlocksBuffer()); cos.flush(); } public static Builder builder() { return new BlockListAsLongs.Builder(); } /** * The number of blocks * @return - the number of blocks */ abstract public int getNumberOfBlocks(); /** * Very efficient encoding of the block report into a ByteString to avoid * the overhead of protobuf repeating fields. Primitive repeating fields * require re-allocs of an ArrayList<Long> and the associated (un)boxing * overhead which puts pressure on GC. * * The structure of the buffer is as follows: * - each replica is represented by 4 longs: * blockId, block length, genstamp, replica state * * @return ByteString encoded block report */ abstract public ByteString getBlocksBuffer(); /** * List of ByteStrings that encode this block report * * @return ByteStrings */ public List<ByteString> getBlocksBuffers() { final ByteString blocksBuf = getBlocksBuffer(); final List<ByteString> buffers; final int size = blocksBuf.size(); if (size <= CHUNK_SIZE) { buffers = Collections.singletonList(blocksBuf); } else { buffers = new ArrayList<ByteString>(); for (int pos=0; pos < size; pos += CHUNK_SIZE) { // this doesn't actually copy the data buffers.add(blocksBuf.substring(pos, Math.min(pos+CHUNK_SIZE, size))); } } return buffers; } /** * Convert block report to old-style list of longs. Only used to * re-encode the block report when the DN detects an older NN. This is * inefficient, but in practice a DN is unlikely to be upgraded first * * The structure of the array is as follows: * 0: the length of the finalized replica list; * 1: the length of the under-construction replica list; * - followed by finalized replica list where each replica is represented by * 3 longs: one for the blockId, one for the block length, and one for * the generation stamp; * - followed by the invalid replica represented with three -1s; * - followed by the under-construction replica list where each replica is * represented by 4 longs: three for the block id, length, generation * stamp, and the fourth for the replica state. * @return list of longs */ abstract public long[] getBlockListAsLongs(); /** * Returns a singleton iterator over blocks in the block report. Do not * add the returned blocks to a collection. * @return Iterator */ abstract public Iterator<BlockReportReplica> iterator(); public static class Builder { private final ByteString.Output out; private final CodedOutputStream cos; private int numBlocks = 0; private int numFinalized = 0; Builder() { out = ByteString.newOutput(64*1024); cos = CodedOutputStream.newInstance(out); } public void add(Replica replica) { try { // zig-zag to reduce size of legacy blocks cos.writeSInt64NoTag(replica.getBlockId()); cos.writeRawVarint64(replica.getBytesOnDisk()); cos.writeRawVarint64(replica.getGenerationStamp()); ReplicaState state = replica.getState(); // although state is not a 64-bit value, using a long varint to // allow for future use of the upper bits cos.writeRawVarint64(state.getValue()); if (state == ReplicaState.FINALIZED) { numFinalized++; } numBlocks++; } catch (IOException ioe) { // shouldn't happen, ByteString.Output doesn't throw IOE throw new IllegalStateException(ioe); } } public int getNumberOfBlocks() { return numBlocks; } public BlockListAsLongs build() { try { cos.flush(); } catch (IOException ioe) { // shouldn't happen, ByteString.Output doesn't throw IOE throw new IllegalStateException(ioe); } return new BufferDecoder(numBlocks, numFinalized, out.toByteString()); } } // decode new-style ByteString buffer based block report private static class BufferDecoder extends BlockListAsLongs { // reserve upper bits for future use. decoding masks off these bits to // allow compatibility for the current through future release that may // start using the bits private static long NUM_BYTES_MASK = (-1L) >>> (64 - 48); private static long REPLICA_STATE_MASK = (-1L) >>> (64 - 4); private final ByteString buffer; private final int numBlocks; private int numFinalized; BufferDecoder(final int numBlocks, final ByteString buf) { this(numBlocks, -1, buf); } BufferDecoder(final int numBlocks, final int numFinalized, final ByteString buf) { this.numBlocks = numBlocks; this.numFinalized = numFinalized; this.buffer = buf; } @Override public int getNumberOfBlocks() { return numBlocks; } @Override public ByteString getBlocksBuffer() { return buffer; } @Override public long[] getBlockListAsLongs() { // terribly inefficient but only occurs if server tries to transcode // an undecoded buffer into longs - ie. it will never happen but let's // handle it anyway if (numFinalized == -1) { int n = 0; for (Replica replica : this) { if (replica.getState() == ReplicaState.FINALIZED) { n++; } } numFinalized = n; } int numUc = numBlocks - numFinalized; int size = 2 + 3*(numFinalized+1) + 4*(numUc); long[] longs = new long[size]; longs[0] = numFinalized; longs[1] = numUc; int idx = 2; int ucIdx = idx + 3*numFinalized; // delimiter block longs[ucIdx++] = -1; longs[ucIdx++] = -1; longs[ucIdx++] = -1; for (BlockReportReplica block : this) { switch (block.getState()) { case FINALIZED: { longs[idx++] = block.getBlockId(); longs[idx++] = block.getNumBytes(); longs[idx++] = block.getGenerationStamp(); break; } default: { longs[ucIdx++] = block.getBlockId(); longs[ucIdx++] = block.getNumBytes(); longs[ucIdx++] = block.getGenerationStamp(); longs[ucIdx++] = block.getState().getValue(); break; } } } return longs; } @Override public Iterator<BlockReportReplica> iterator() { return new Iterator<BlockReportReplica>() { final BlockReportReplica block = new BlockReportReplica(); final CodedInputStream cis = buffer.newCodedInput(); private int currentBlockIndex = 0; @Override public boolean hasNext() { return currentBlockIndex < numBlocks; } @Override public BlockReportReplica next() { currentBlockIndex++; try { // zig-zag to reduce size of legacy blocks and mask off bits // we don't (yet) understand block.setBlockId(cis.readSInt64()); block.setNumBytes(cis.readRawVarint64() & NUM_BYTES_MASK); block.setGenerationStamp(cis.readRawVarint64()); long state = cis.readRawVarint64() & REPLICA_STATE_MASK; block.setState(ReplicaState.getState((int)state)); } catch (IOException e) { throw new IllegalStateException(e); } return block; } @Override public void remove() { throw new UnsupportedOperationException(); } }; } } // decode old style block report of longs private static class LongsDecoder extends BlockListAsLongs { private final List<Long> values; private final int finalizedBlocks; private final int numBlocks; // set the header LongsDecoder(List<Long> values) { this.values = values.subList(2, values.size()); this.finalizedBlocks = values.get(0).intValue(); this.numBlocks = finalizedBlocks + values.get(1).intValue(); } @Override public int getNumberOfBlocks() { return numBlocks; } @Override public ByteString getBlocksBuffer() { Builder builder = builder(); for (Replica replica : this) { builder.add(replica); } return builder.build().getBlocksBuffer(); } @Override public long[] getBlockListAsLongs() { long[] longs = new long[2+values.size()]; longs[0] = finalizedBlocks; longs[1] = numBlocks - finalizedBlocks; for (int i=0; i < longs.length; i++) { longs[i] = values.get(i); } return longs; } @Override public Iterator<BlockReportReplica> iterator() { return new Iterator<BlockReportReplica>() { private final BlockReportReplica block = new BlockReportReplica(); final Iterator<Long> iter = values.iterator(); private int currentBlockIndex = 0; @Override public boolean hasNext() { return currentBlockIndex < numBlocks; } @Override public BlockReportReplica next() { if (currentBlockIndex == finalizedBlocks) { // verify the presence of the delimiter block readBlock(); Preconditions.checkArgument(block.getBlockId() == -1 && block.getNumBytes() == -1 && block.getGenerationStamp() == -1, "Invalid delimiter block"); } readBlock(); if (currentBlockIndex++ < finalizedBlocks) { block.setState(ReplicaState.FINALIZED); } else { block.setState(ReplicaState.getState(iter.next().intValue())); } return block; } private void readBlock() { block.setBlockId(iter.next()); block.setNumBytes(iter.next()); block.setGenerationStamp(iter.next()); } @Override public void remove() { throw new UnsupportedOperationException(); } }; } } @InterfaceAudience.Private public static class BlockReportReplica extends Block implements Replica { private ReplicaState state; private BlockReportReplica() { } public BlockReportReplica(Block block) { super(block); if (block instanceof BlockReportReplica) { this.state = ((BlockReportReplica)block).getState(); } else { this.state = ReplicaState.FINALIZED; } } public void setState(ReplicaState state) { this.state = state; } @Override public ReplicaState getState() { return state; } @Override public long getBytesOnDisk() { return getNumBytes(); } @Override public long getVisibleLength() { throw new UnsupportedOperationException(); } @Override public String getStorageUuid() { throw new UnsupportedOperationException(); } @Override public boolean isOnTransientStorage() { throw new UnsupportedOperationException(); } @Override public boolean equals(Object o) { return super.equals(o); } @Override public int hashCode() { return super.hashCode(); } } }
16,157
30.558594
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockLocalPathInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A block and the full path information to the block data file and * the metadata file stored on the local file system. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class BlockLocalPathInfo { private final ExtendedBlock block; private String localBlockPath = ""; // local file storing the data private String localMetaPath = ""; // local file storing the checksum /** * Constructs BlockLocalPathInfo. * @param b The block corresponding to this lock path info. * @param file Block data file. * @param metafile Metadata file for the block. */ public BlockLocalPathInfo(ExtendedBlock b, String file, String metafile) { block = b; localBlockPath = file; localMetaPath = metafile; } /** * Get the Block data file. * @return Block data file. */ public String getBlockPath() {return localBlockPath;} /** * @return the Block */ public ExtendedBlock getBlock() { return block;} /** * Get the Block metadata file. * @return Block metadata file. */ public String getMetaPath() {return localMetaPath;} /** * Get number of bytes in the block. * @return Number of bytes in the block. */ public long getNumBytes() { return block.getNumBytes(); } }
2,237
30.521127
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.net.URI; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtilClient; /** * Interface that represents the over the wire information * including block locations for a file. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class HdfsLocatedFileStatus extends HdfsFileStatus { private final LocatedBlocks locations; /** * Constructor * * @param length size * @param isdir if this is directory * @param block_replication the file's replication factor * @param blocksize the file's block size * @param modification_time most recent modification time * @param access_time most recent access time * @param permission permission * @param owner owner * @param group group * @param symlink symbolic link * @param path local path name in java UTF8 format * @param fileId the file id * @param locations block locations * @param feInfo file encryption info */ public HdfsLocatedFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, long access_time, FsPermission permission, String owner, String group, byte[] symlink, byte[] path, long fileId, LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy) { super(length, isdir, block_replication, blocksize, modification_time, access_time, permission, owner, group, symlink, path, fileId, childrenNum, feInfo, storagePolicy); this.locations = locations; } public LocatedBlocks getBlockLocations() { return locations; } public final LocatedFileStatus makeQualifiedLocated(URI defaultUri, Path path) { return new LocatedFileStatus(getLen(), isDir(), getReplication(), getBlockSize(), getModificationTime(), getAccessTime(), getPermission(), getOwner(), getGroup(), isSymlink() ? new Path(getSymlink()) : null, (getFullPath(path)).makeQualified( defaultUri, null), // fully-qualify path DFSUtilClient.locatedBlocks2Locations(getBlockLocations())); } }
3,226
37.416667
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; /** * SnapshotInfo maintains information for a snapshot */ @InterfaceAudience.Private @InterfaceStability.Evolving public class SnapshotInfo { private final String snapshotName; private final String snapshotRoot; private final String createTime; private final FsPermissionProto permission; private final String owner; private final String group; public SnapshotInfo(String sname, String sroot, String ctime, FsPermissionProto permission, String owner, String group) { this.snapshotName = sname; this.snapshotRoot = sroot; this.createTime = ctime; this.permission = permission; this.owner = owner; this.group = group; } final public String getSnapshotName() { return snapshotName; } final public String getSnapshotRoot() { return snapshotRoot; } final public String getCreateTime() { return createTime; } final public FsPermissionProto getPermission() { return permission; } final public String getOwner() { return owner; } final public String getGroup() { return group; } @Override public String toString() { return getClass().getSimpleName() + "{snapshotName=" + snapshotName + "; snapshotRoot=" + snapshotRoot + "; createTime=" + createTime + "; permission=" + permission + "; owner=" + owner + "; group=" + group + "}"; } public static class Bean { private final String snapshotID; private final String snapshotDirectory; private final long modificationTime; public Bean(String snapshotID, String snapshotDirectory, long modificationTime) { this.snapshotID = snapshotID; this.snapshotDirectory = snapshotDirectory; this.modificationTime = modificationTime; } public String getSnapshotID() { return snapshotID; } public String getSnapshotDirectory() { return snapshotDirectory; } public long getModificationTime() { return modificationTime; } } }
3,133
27.752294
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.protocol.JournalInfo; import org.apache.hadoop.hdfs.server.protocol.NodeRegistration; /** * This exception is thrown when a node that has not previously * registered is trying to access the name node. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class UnregisteredNodeException extends IOException { private static final long serialVersionUID = -5620209396945970810L; public UnregisteredNodeException(JournalInfo info) { super("Unregistered server: " + info.toString()); } public UnregisteredNodeException(NodeRegistration nodeReg) { super("Unregistered server: " + nodeReg.toString()); } /** * The exception is thrown if a different data-node claims the same * storage id as the existing one. * * @param nodeID unregistered data-node * @param storedNode data-node stored in the system with this storage id */ public UnregisteredNodeException(DatanodeID nodeID, DatanodeInfo storedNode) { super("Data node " + nodeID + " is attempting to report storage ID " + nodeID.getDatanodeUuid() + ". Node " + storedNode + " is expected to serve this storage."); } }
2,191
36.793103
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FSLimitException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving /** * Abstract class for deriving exceptions related to filesystem constraints */ public abstract class FSLimitException extends QuotaExceededException { protected static final long serialVersionUID = 1L; protected FSLimitException() {} protected FSLimitException(String msg) { super(msg); } protected FSLimitException(long quota, long count) { super(quota, count); } /** * Path component length is too long */ public static final class PathComponentTooLongException extends FSLimitException { protected static final long serialVersionUID = 1L; private String childName; protected PathComponentTooLongException() {} protected PathComponentTooLongException(String msg) { super(msg); } public PathComponentTooLongException(long quota, long count, String parentPath, String childName) { super(quota, count); setPathName(parentPath); this.childName = childName; } String getParentPath() { return pathName; } @Override public String getMessage() { return "The maximum path component name limit of " + childName + " in directory " + getParentPath() + " is exceeded: limit=" + quota + " length=" + count; } } /** * Directory has too many items */ public static final class MaxDirectoryItemsExceededException extends FSLimitException { protected static final long serialVersionUID = 1L; protected MaxDirectoryItemsExceededException() {} protected MaxDirectoryItemsExceededException(String msg) { super(msg); } public MaxDirectoryItemsExceededException(long quota, long count) { super(quota, count); } @Override public String getMessage() { return "The directory item limit of " + pathName + " is exceeded: limit=" + quota + " items=" + count; } } }
2,909
27.811881
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaByStorageTypeExceededException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String; @InterfaceAudience.Private @InterfaceStability.Evolving public class QuotaByStorageTypeExceededException extends QuotaExceededException { protected static final long serialVersionUID = 1L; protected StorageType type; public QuotaByStorageTypeExceededException() {} public QuotaByStorageTypeExceededException(String msg) { super(msg); } public QuotaByStorageTypeExceededException(long quota, long count, StorageType type) { super(quota, count); this.type = type; } @Override public String getMessage() { String msg = super.getMessage(); if (msg == null) { return "Quota by storage type : " + type.toString() + " on path : " + (pathName==null ? "": pathName) + " is exceeded. quota = " + long2String(quota, "B", 2) + " but space consumed = " + long2String(count, "B", 2); } else { return msg; } } }
1,990
33.929825
88
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; /** Snapshot related exception. */ public class SnapshotException extends IOException { private static final long serialVersionUID = 1L; public SnapshotException(final String message) { super(message); } public SnapshotException(final Throwable cause) { super(cause); } }
1,170
33.441176
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.ReconfigurationTaskStatus; import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; /** An client-datanode protocol for block recovery */ @InterfaceAudience.Private @InterfaceStability.Evolving @KerberosInfo( serverPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) @TokenInfo(BlockTokenSelector.class) public interface ClientDatanodeProtocol { /** * Until version 9, this class ClientDatanodeProtocol served as both * the client interface to the DN AND the RPC protocol used to * communicate with the NN. * * This class is used by both the DFSClient and the * DN server side to insulate from the protocol serialization. * * If you are adding/changing DN's interface then you need to * change both this class and ALSO related protocol buffer * wire protocol definition in ClientDatanodeProtocol.proto. * * For more details on protocol buffer wire protocol, please see * .../org/apache/hadoop/hdfs/protocolPB/overview.html * * The log of historical changes can be retrieved from the svn). * 9: Added deleteBlockPool method * * 9 is the last version id when this class was used for protocols * serialization. DO not update this version any further. */ public static final long versionID = 9L; /** Return the visible length of a replica. */ long getReplicaVisibleLength(ExtendedBlock b) throws IOException; /** * Refresh the list of federated namenodes from updated configuration * Adds new namenodes and stops the deleted namenodes. * * @throws IOException on error **/ void refreshNamenodes() throws IOException; /** * Delete the block pool directory. If force is false it is deleted only if * it is empty, otherwise it is deleted along with its contents. * * @param bpid Blockpool id to be deleted. * @param force If false blockpool directory is deleted only if it is empty * i.e. if it doesn't contain any block files, otherwise it is * deleted along with its contents. * @throws IOException */ void deleteBlockPool(String bpid, boolean force) throws IOException; /** * Retrieves the path names of the block file and metadata file stored on the * local file system. * * In order for this method to work, one of the following should be satisfied: * <ul> * <li> * The client user must be configured at the datanode to be able to use this * method.</li> * <li> * When security is enabled, kerberos authentication must be used to connect * to the datanode.</li> * </ul> * * @param block * the specified block on the local datanode * @param token * the block access token. * @return the BlockLocalPathInfo of a block * @throws IOException * on error */ BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, Token<BlockTokenIdentifier> token) throws IOException; /** * Retrieves volume location information about a list of blocks on a datanode. * This is in the form of an opaque {@link org.apache.hadoop.fs.VolumeId} * for each configured data directory, which is not guaranteed to be * the same across DN restarts. * * @param blockPoolId the pool to query * @param blockIds * list of blocks on the local datanode * @param tokens * block access tokens corresponding to the requested blocks * @return an HdfsBlocksMetadata that associates {@link ExtendedBlock}s with * data directories * @throws IOException * if datanode is unreachable, or replica is not found on datanode */ HdfsBlocksMetadata getHdfsBlocksMetadata(String blockPoolId, long []blockIds, List<Token<BlockTokenIdentifier>> tokens) throws IOException; /** * Shuts down a datanode. * * @param forUpgrade If true, data node does extra prep work before shutting * down. The work includes advising clients to wait and saving * certain states for quick restart. This should only be used when * the stored data will remain the same during upgrade/restart. * @throws IOException */ void shutdownDatanode(boolean forUpgrade) throws IOException; /** * Obtains datanode info * * @return software/config version and uptime of the datanode */ DatanodeLocalInfo getDatanodeInfo() throws IOException; /** * Asynchronously reload configuration on disk and apply changes. */ void startReconfiguration() throws IOException; /** * Get the status of the previously issued reconfig task. * @see {@link org.apache.hadoop.conf.ReconfigurationTaskStatus}. */ ReconfigurationTaskStatus getReconfigurationStatus() throws IOException; /** * Get a list of allowed properties for reconfiguration. */ List<String> listReconfigurableProperties() throws IOException; /** * Trigger a new block report. */ void triggerBlockReport(BlockReportOptions options) throws IOException; }
6,488
36.293103
85
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; /** * LayoutFlags represent features which the FSImage and edit logs can either * support or not, independently of layout version. * * Note: all flags starting with 'test' are reserved for unit test purposes. */ @InterfaceAudience.Private public class LayoutFlags { /** * Load a LayoutFlags object from a stream. * * @param in The stream to read from. * @throws IOException */ public static LayoutFlags read(DataInputStream in) throws IOException { int length = in.readInt(); if (length < 0) { throw new IOException("The length of the feature flag section " + "was negative at " + length + " bytes."); } else if (length > 0) { throw new IOException("Found feature flags which we can't handle. " + "Please upgrade your software."); } return new LayoutFlags(); } private LayoutFlags() { } public static void write(DataOutputStream out) throws IOException { out.writeInt(0); } }
2,159
32.230769
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RecoveryInProgressException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Exception indicating that a replica is already being recovery. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class RecoveryInProgressException extends IOException { private static final long serialVersionUID = 1L; public RecoveryInProgressException(String msg) { super(msg); } }
1,315
35.555556
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/RollingUpgradeException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Exception related to rolling upgrade. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class RollingUpgradeException extends IOException { private static final long serialVersionUID = 1L; public RollingUpgradeException(String msg) { super(msg); } }
1,282
34.638889
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import java.io.InputStream; import java.io.OutputStream; import org.apache.hadoop.classification.InterfaceAudience; /** * A little struct class to wrap an InputStream and an OutputStream. */ @InterfaceAudience.Private public class IOStreamPair { public final InputStream in; public final OutputStream out; public IOStreamPair(InputStream in, OutputStream out) { this.in = in; this.out = out; } }
1,280
33.621622
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import java.net.InetAddress; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.util.ReflectionUtils; /** * Class used to indicate whether a channel is trusted or not. * The default implementation is to return false indicating that * the channel is not trusted. * This class can be overridden to provide custom logic to determine * whether a channel is trusted or not. * The custom class can be specified via configuration. * */ public class TrustedChannelResolver implements Configurable { Configuration conf; /** * Returns an instance of TrustedChannelResolver. * Looks up the configuration to see if there is custom class specified. * @param conf * @return TrustedChannelResolver */ public static TrustedChannelResolver getInstance(Configuration conf) { Class<? extends TrustedChannelResolver> clazz = conf.getClass( DFSConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS, TrustedChannelResolver.class, TrustedChannelResolver.class); return ReflectionUtils.newInstance(clazz, conf); } @Override public void setConf(Configuration conf) { this.conf = conf; } @Override public Configuration getConf() { return conf; } /** * Return boolean value indicating whether a channel is trusted or not * from a client's perspective. * @return true if the channel is trusted and false otherwise. */ public boolean isTrusted() { return false; } /** * Identify boolean value indicating whether a channel is trusted or not. * @param peerAddress address of the peer * @return true if the channel is trusted and false otherwise. */ public boolean isTrusted(InetAddress peerAddress) { return false; } }
2,687
31.780488
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; import org.apache.htrace.Span; import org.apache.htrace.Trace; import org.apache.htrace.TraceInfo; import org.apache.htrace.TraceScope; /** * Static utilities for dealing with the protocol buffers used by the * Data Transfer Protocol. */ @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class DataTransferProtoUtil { static BlockConstructionStage fromProto( OpWriteBlockProto.BlockConstructionStage stage) { return BlockConstructionStage.valueOf(stage.name()); } static OpWriteBlockProto.BlockConstructionStage toProto( BlockConstructionStage stage) { return OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()); } public static ChecksumProto toProto(DataChecksum checksum) { ChecksumTypeProto type = PBHelper.convert(checksum.getChecksumType()); // ChecksumType#valueOf never returns null return ChecksumProto.newBuilder() .setBytesPerChecksum(checksum.getBytesPerChecksum()) .setType(type) .build(); } public static DataChecksum fromProto(ChecksumProto proto) { if (proto == null) return null; int bytesPerChecksum = proto.getBytesPerChecksum(); DataChecksum.Type type = PBHelper.convert(proto.getType()); return DataChecksum.newDataChecksum(type, bytesPerChecksum); } static ClientOperationHeaderProto buildClientHeader(ExtendedBlock blk, String client, Token<BlockTokenIdentifier> blockToken) { ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder() .setBaseHeader(buildBaseHeader(blk, blockToken)) .setClientName(client) .build(); return header; } static BaseHeaderProto buildBaseHeader(ExtendedBlock blk, Token<BlockTokenIdentifier> blockToken) { BaseHeaderProto.Builder builder = BaseHeaderProto.newBuilder() .setBlock(PBHelper.convert(blk)) .setToken(PBHelper.convert(blockToken)); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() .setTraceId(s.getTraceId()) .setParentId(s.getSpanId())); } return builder.build(); } public static TraceInfo fromProto(DataTransferTraceInfoProto proto) { if (proto == null) return null; if (!proto.hasTraceId()) return null; return new TraceInfo(proto.getTraceId(), proto.getParentId()); } public static TraceScope continueTraceSpan(ClientOperationHeaderProto header, String description) { return continueTraceSpan(header.getBaseHeader(), description); } public static TraceScope continueTraceSpan(BaseHeaderProto header, String description) { return continueTraceSpan(header.getTraceInfo(), description); } public static TraceScope continueTraceSpan(DataTransferTraceInfoProto proto, String description) { TraceScope scope = null; TraceInfo info = fromProto(proto); if (info != null) { scope = Trace.startSpan(description, info); } return scope; } public static void checkBlockOpStatus( BlockOpResponseProto response, String logInfo) throws IOException { if (response.getStatus() != Status.SUCCESS) { if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) { throw new InvalidBlockTokenException( "Got access token error" + ", status message " + response.getMessage() + ", " + logInfo ); } else { throw new IOException( "Got error" + ", status message " + response.getMessage() + ", " + logInfo ); } } } }
5,714
37.355705
91
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/InvalidEncryptionKeyException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Encryption key verification failed. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class InvalidEncryptionKeyException extends IOException { private static final long serialVersionUID = 0l; public InvalidEncryptionKeyException() { super(); } public InvalidEncryptionKeyException(String msg) { super(msg); } }
1,367
32.365854
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; /** * Transfer data to/from datanode using a streaming protocol. */ @InterfaceAudience.Private @InterfaceStability.Evolving public interface DataTransferProtocol { public static final Log LOG = LogFactory.getLog(DataTransferProtocol.class); /** Version for data transfers between clients and datanodes * This should change when serialization of DatanodeInfo, not just * when protocol changes. It is not very obvious. */ /* * Version 28: * Declare methods in DataTransferProtocol interface. */ public static final int DATA_TRANSFER_VERSION = 28; /** * Read a block. * * @param blk the block being read. * @param blockToken security token for accessing the block. * @param clientName client's name. * @param blockOffset offset of the block. * @param length maximum number of bytes for this read. * @param sendChecksum if false, the DN should skip reading and sending * checksums * @param cachingStrategy The caching strategy to use. */ public void readBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, final String clientName, final long blockOffset, final long length, final boolean sendChecksum, final CachingStrategy cachingStrategy) throws IOException; /** * Write a block to a datanode pipeline. * The receiver datanode of this call is the next datanode in the pipeline. * The other downstream datanodes are specified by the targets parameter. * Note that the receiver {@link DatanodeInfo} is not required in the * parameter list since the receiver datanode knows its info. However, the * {@link StorageType} for storing the replica in the receiver datanode is a * parameter since the receiver datanode may support multiple storage types. * * @param blk the block being written. * @param storageType for storing the replica in the receiver datanode. * @param blockToken security token for accessing the block. * @param clientName client's name. * @param targets other downstream datanodes in the pipeline. * @param targetStorageTypes target {@link StorageType}s corresponding * to the target datanodes. * @param source source datanode. * @param stage pipeline stage. * @param pipelineSize the size of the pipeline. * @param minBytesRcvd minimum number of bytes received. * @param maxBytesRcvd maximum number of bytes received. * @param latestGenerationStamp the latest generation stamp of the block. * @param pinning whether to pin the block, so Balancer won't move it. * @param targetPinnings whether to pin the block on target datanode */ public void writeBlock(final ExtendedBlock blk, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final DatanodeInfo source, final BlockConstructionStage stage, final int pipelineSize, final long minBytesRcvd, final long maxBytesRcvd, final long latestGenerationStamp, final DataChecksum requestedChecksum, final CachingStrategy cachingStrategy, final boolean allowLazyPersist, final boolean pinning, final boolean[] targetPinnings) throws IOException; /** * Transfer a block to another datanode. * The block stage must be * either {@link BlockConstructionStage#TRANSFER_RBW} * or {@link BlockConstructionStage#TRANSFER_FINALIZED}. * * @param blk the block being transferred. * @param blockToken security token for accessing the block. * @param clientName client's name. * @param targets target datanodes. */ public void transferBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes) throws IOException; /** * Request short circuit access file descriptors from a DataNode. * * @param blk The block to get file descriptors for. * @param blockToken Security token for accessing the block. * @param slotId The shared memory slot id to use, or null * to use no slot id. * @param maxVersion Maximum version of the block data the client * can understand. * @param supportsReceiptVerification True if the client supports * receipt verification. */ public void requestShortCircuitFds(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, SlotId slotId, int maxVersion, boolean supportsReceiptVerification) throws IOException; /** * Release a pair of short-circuit FDs requested earlier. * * @param slotId SlotID used by the earlier file descriptors. */ public void releaseShortCircuitFds(final SlotId slotId) throws IOException; /** * Request a short circuit shared memory area from a DataNode. * * @param clientName The name of the client. */ public void requestShortCircuitShm(String clientName) throws IOException; /** * Receive a block from a source datanode * and then notifies the namenode * to remove the copy from the original datanode. * Note that the source datanode and the original datanode can be different. * It is used for balancing purpose. * * @param blk the block being replaced. * @param storageType the {@link StorageType} for storing the block. * @param blockToken security token for accessing the block. * @param delHint the hint for deleting the block in the original datanode. * @param source the source datanode for receiving the block. */ public void replaceBlock(final ExtendedBlock blk, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String delHint, final DatanodeInfo source) throws IOException; /** * Copy a block. * It is used for balancing purpose. * * @param blk the block being copied. * @param blockToken security token for accessing the block. */ public void copyBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken) throws IOException; /** * Get block checksum (MD5 of CRC32). * * @param blk a block. * @param blockToken security token for accessing the block. * @throws IOException */ public void blockChecksum(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken) throws IOException; }
8,186
39.529703
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.HdfsConfiguration; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_DEFAULT; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import com.google.protobuf.TextFormat; import org.apache.hadoop.hdfs.util.LongBitFormat; /** Pipeline Acknowledgment **/ @InterfaceAudience.Private @InterfaceStability.Evolving public class PipelineAck { PipelineAckProto proto; public final static long UNKOWN_SEQNO = -2; final static int OOB_START = Status.OOB_RESTART_VALUE; // the first OOB type final static int OOB_END = Status.OOB_RESERVED3_VALUE; // the last OOB type final static int NUM_OOB_TYPES = OOB_END - OOB_START + 1; // place holder for timeout value of each OOB type final static long[] OOB_TIMEOUT; public enum ECN { DISABLED(0), SUPPORTED(1), SUPPORTED2(2), CONGESTED(3); private final int value; private static final ECN[] VALUES = values(); static ECN valueOf(int value) { return VALUES[value]; } ECN(int value) { this.value = value; } public int getValue() { return value; } } private enum StatusFormat { STATUS(null, 4), RESERVED(STATUS.BITS, 1), ECN_BITS(RESERVED.BITS, 2); private final LongBitFormat BITS; StatusFormat(LongBitFormat prev, int bits) { BITS = new LongBitFormat(name(), prev, bits, 0); } static Status getStatus(int header) { return Status.valueOf((int) STATUS.BITS.retrieve(header)); } static ECN getECN(int header) { return ECN.valueOf((int) ECN_BITS.BITS.retrieve(header)); } public static int setStatus(int old, Status status) { return (int) STATUS.BITS.combine(status.getNumber(), old); } public static int setECN(int old, ECN ecn) { return (int) ECN_BITS.BITS.combine(ecn.getValue(), old); } } static { OOB_TIMEOUT = new long[NUM_OOB_TYPES]; HdfsConfiguration conf = new HdfsConfiguration(); String[] ele = conf.get(DFS_DATANODE_OOB_TIMEOUT_KEY, DFS_DATANODE_OOB_TIMEOUT_DEFAULT).split(","); for (int i = 0; i < NUM_OOB_TYPES; i++) { OOB_TIMEOUT[i] = (i < ele.length) ? Long.parseLong(ele[i]) : 0; } } /** default constructor **/ public PipelineAck() { } /** * Constructor assuming no next DN in pipeline * @param seqno sequence number * @param replies an array of replies */ public PipelineAck(long seqno, int[] replies) { this(seqno, replies, 0L); } /** * Constructor * @param seqno sequence number * @param replies an array of replies * @param downstreamAckTimeNanos ack RTT in nanoseconds, 0 if no next DN in pipeline */ public PipelineAck(long seqno, int[] replies, long downstreamAckTimeNanos) { ArrayList<Status> statusList = Lists.newArrayList(); ArrayList<Integer> flagList = Lists.newArrayList(); for (int r : replies) { statusList.add(StatusFormat.getStatus(r)); flagList.add(r); } proto = PipelineAckProto.newBuilder() .setSeqno(seqno) .addAllReply(statusList) .addAllFlag(flagList) .setDownstreamAckTimeNanos(downstreamAckTimeNanos) .build(); } /** * Get the sequence number * @return the sequence number */ public long getSeqno() { return proto.getSeqno(); } /** * Get the number of replies * @return the number of replies */ public short getNumOfReplies() { return (short)proto.getReplyCount(); } /** * get the header flag of ith reply */ public int getHeaderFlag(int i) { if (proto.getFlagCount() > 0) { return proto.getFlag(i); } else { return combineHeader(ECN.DISABLED, proto.getReply(i)); } } public int getFlag(int i) { return proto.getFlag(i); } /** * Get the time elapsed for downstream ack RTT in nanoseconds * @return time elapsed for downstream ack in nanoseconds, 0 if no next DN in pipeline */ public long getDownstreamAckTimeNanos() { return proto.getDownstreamAckTimeNanos(); } /** * Check if this ack contains error status * @return true if all statuses are SUCCESS */ public boolean isSuccess() { for (Status s : proto.getReplyList()) { if (s != Status.SUCCESS) { return false; } } return true; } /** * Returns the OOB status if this ack contains one. * @return null if it is not an OOB ack. */ public Status getOOBStatus() { // Normal data transfer acks will have a valid sequence number, so // this will return right away in most cases. if (getSeqno() != UNKOWN_SEQNO) { return null; } for (Status s : proto.getReplyList()) { // The following check is valid because protobuf guarantees to // preserve the ordering of enum elements. if (s.getNumber() >= OOB_START && s.getNumber() <= OOB_END) { return s; } } return null; } /** * Get the timeout to be used for transmitting the OOB type * @return the timeout in milliseconds */ public static long getOOBTimeout(Status status) throws IOException { int index = status.getNumber() - OOB_START; if (index >= 0 && index < NUM_OOB_TYPES) { return OOB_TIMEOUT[index]; } // Not an OOB. throw new IOException("Not an OOB status: " + status); } /** Get the Restart OOB ack status */ public static Status getRestartOOBStatus() { return Status.OOB_RESTART; } /** return true if it is the restart OOB status code */ public static boolean isRestartOOBStatus(Status st) { return st.equals(Status.OOB_RESTART); } /**** Writable interface ****/ public void readFields(InputStream in) throws IOException { proto = PipelineAckProto.parseFrom(vintPrefixed(in)); } public void write(OutputStream out) throws IOException { proto.writeDelimitedTo(out); } @Override //Object public String toString() { return TextFormat.shortDebugString(proto); } public static Status getStatusFromHeader(int header) { return StatusFormat.getStatus(header); } public static ECN getECNFromHeader(int header) { return StatusFormat.getECN(header); } public static int setStatusForHeader(int old, Status status) { return StatusFormat.setStatus(old, status); } public static int combineHeader(ECN ecn, Status status) { int header = 0; header = StatusFormat.setStatus(header, status); header = StatusFormat.setECN(header, ecn); return header; } }
7,963
27.96
88
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.fromProto; import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.continueTraceSpan; import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; import java.io.DataInputStream; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId; import org.apache.htrace.TraceScope; /** Receiver */ @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class Receiver implements DataTransferProtocol { protected DataInputStream in; /** Initialize a receiver for DataTransferProtocol with a socket. */ protected void initialize(final DataInputStream in) { this.in = in; } /** Read an Op. It also checks protocol version. */ protected final Op readOp() throws IOException { final short version = in.readShort(); if (version != DataTransferProtocol.DATA_TRANSFER_VERSION) { throw new IOException( "Version Mismatch (Expected: " + DataTransferProtocol.DATA_TRANSFER_VERSION + ", Received: " + version + " )"); } return Op.read(in); } /** Process op by the corresponding method. */ protected final void processOp(Op op) throws IOException { switch(op) { case READ_BLOCK: opReadBlock(); break; case WRITE_BLOCK: opWriteBlock(in); break; case REPLACE_BLOCK: opReplaceBlock(in); break; case COPY_BLOCK: opCopyBlock(in); break; case BLOCK_CHECKSUM: opBlockChecksum(in); break; case TRANSFER_BLOCK: opTransferBlock(in); break; case REQUEST_SHORT_CIRCUIT_FDS: opRequestShortCircuitFds(in); break; case RELEASE_SHORT_CIRCUIT_FDS: opReleaseShortCircuitFds(in); break; case REQUEST_SHORT_CIRCUIT_SHM: opRequestShortCircuitShm(in); break; default: throw new IOException("Unknown op " + op + " in data stream"); } } static private CachingStrategy getCachingStrategy(CachingStrategyProto strategy) { Boolean dropBehind = strategy.hasDropBehind() ? strategy.getDropBehind() : null; Long readahead = strategy.hasReadahead() ? strategy.getReadahead() : null; return new CachingStrategy(dropBehind, readahead); } /** Receive OP_READ_BLOCK */ private void opReadBlock() throws IOException { OpReadBlockProto proto = OpReadBlockProto.parseFrom(vintPrefixed(in)); TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { readBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), proto.getOffset(), proto.getLen(), proto.getSendChecksums(), (proto.hasCachingStrategy() ? getCachingStrategy(proto.getCachingStrategy()) : CachingStrategy.newDefaultStrategy())); } finally { if (traceScope != null) traceScope.close(); } } /** Receive OP_WRITE_BLOCK */ private void opWriteBlock(DataInputStream in) throws IOException { final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in)); final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList()); TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelper.convertStorageType(proto.getStorageType()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), targets, PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length), PBHelper.convert(proto.getSource()), fromProto(proto.getStage()), proto.getPipelineSize(), proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(), proto.getLatestGenerationStamp(), fromProto(proto.getRequestedChecksum()), (proto.hasCachingStrategy() ? getCachingStrategy(proto.getCachingStrategy()) : CachingStrategy.newDefaultStrategy()), (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false), (proto.hasPinning() ? proto.getPinning(): false), (PBHelper.convertBooleanList(proto.getTargetPinningsList()))); } finally { if (traceScope != null) traceScope.close(); } } /** Receive {@link Op#TRANSFER_BLOCK} */ private void opTransferBlock(DataInputStream in) throws IOException { final OpTransferBlockProto proto = OpTransferBlockProto.parseFrom(vintPrefixed(in)); final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList()); TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), targets, PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length)); } finally { if (traceScope != null) traceScope.close(); } } /** Receive {@link Op#REQUEST_SHORT_CIRCUIT_FDS} */ private void opRequestShortCircuitFds(DataInputStream in) throws IOException { final OpRequestShortCircuitAccessProto proto = OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in)); SlotId slotId = (proto.hasSlotId()) ? PBHelper.convert(proto.getSlotId()) : null; TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { requestShortCircuitFds(PBHelper.convert(proto.getHeader().getBlock()), PBHelper.convert(proto.getHeader().getToken()), slotId, proto.getMaxVersion(), proto.getSupportsReceiptVerification()); } finally { if (traceScope != null) traceScope.close(); } } /** Receive {@link Op#RELEASE_SHORT_CIRCUIT_FDS} */ private void opReleaseShortCircuitFds(DataInputStream in) throws IOException { final ReleaseShortCircuitAccessRequestProto proto = ReleaseShortCircuitAccessRequestProto.parseFrom(vintPrefixed(in)); TraceScope traceScope = continueTraceSpan(proto.getTraceInfo(), proto.getClass().getSimpleName()); try { releaseShortCircuitFds(PBHelper.convert(proto.getSlotId())); } finally { if (traceScope != null) traceScope.close(); } } /** Receive {@link Op#REQUEST_SHORT_CIRCUIT_SHM} */ private void opRequestShortCircuitShm(DataInputStream in) throws IOException { final ShortCircuitShmRequestProto proto = ShortCircuitShmRequestProto.parseFrom(vintPrefixed(in)); TraceScope traceScope = continueTraceSpan(proto.getTraceInfo(), proto.getClass().getSimpleName()); try { requestShortCircuitShm(proto.getClientName()); } finally { if (traceScope != null) traceScope.close(); } } /** Receive OP_REPLACE_BLOCK */ private void opReplaceBlock(DataInputStream in) throws IOException { OpReplaceBlockProto proto = OpReplaceBlockProto.parseFrom(vintPrefixed(in)); TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { replaceBlock(PBHelper.convert(proto.getHeader().getBlock()), PBHelper.convertStorageType(proto.getStorageType()), PBHelper.convert(proto.getHeader().getToken()), proto.getDelHint(), PBHelper.convert(proto.getSource())); } finally { if (traceScope != null) traceScope.close(); } } /** Receive OP_COPY_BLOCK */ private void opCopyBlock(DataInputStream in) throws IOException { OpCopyBlockProto proto = OpCopyBlockProto.parseFrom(vintPrefixed(in)); TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { copyBlock(PBHelper.convert(proto.getHeader().getBlock()), PBHelper.convert(proto.getHeader().getToken())); } finally { if (traceScope != null) traceScope.close(); } } /** Receive OP_BLOCK_CHECKSUM */ private void opBlockChecksum(DataInputStream in) throws IOException { OpBlockChecksumProto proto = OpBlockChecksumProto.parseFrom(vintPrefixed(in)); TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName()); try { blockChecksum(PBHelper.convert(proto.getHeader().getBlock()), PBHelper.convert(proto.getHeader().getToken())); } finally { if (traceScope != null) traceScope.close(); } } }
10,858
39.977358
102
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/WhitelistBasedTrustedChannelResolver.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import java.net.InetAddress; import java.net.UnknownHostException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.util.CombinedIPWhiteList; public class WhitelistBasedTrustedChannelResolver extends TrustedChannelResolver { private CombinedIPWhiteList whiteListForServer; private CombinedIPWhiteList whitelistForClient; private static final String FIXEDWHITELIST_DEFAULT_LOCATION = "/etc/hadoop/fixedwhitelist"; private static final String VARIABLEWHITELIST_DEFAULT_LOCATION = "/etc/hadoop/whitelist"; /** * Path to the file to containing subnets and ip addresses to form fixed whitelist. */ public static final String DFS_DATATRANSFER_SERVER_FIXEDWHITELIST_FILE = "dfs.datatransfer.server.fixedwhitelist.file"; /** * Enables/Disables variable whitelist */ public static final String DFS_DATATRANSFER_SERVER_VARIABLEWHITELIST_ENABLE = "dfs.datatransfer.server.variablewhitelist.enable"; /** * Path to the file to containing subnets and ip addresses to form variable whitelist. */ public static final String DFS_DATATRANSFER_SERVER_VARIABLEWHITELIST_FILE = "dfs.datatransfer.server.variablewhitelist.file"; /** * time in seconds by which the variable whitelist file is checked for updates */ public static final String DFS_DATATRANSFER_SERVER_VARIABLEWHITELIST_CACHE_SECS = "dfs.datatransfer.server.variablewhitelist.cache.secs"; /** * Path to the file to containing subnets and ip addresses to form fixed whitelist. */ public static final String DFS_DATATRANSFER_CLIENT_FIXEDWHITELIST_FILE = "dfs.datatransfer.client.fixedwhitelist.file"; /** * Enables/Disables variable whitelist */ public static final String DFS_DATATRANSFER_CLIENT_VARIABLEWHITELIST_ENABLE = "dfs.datatransfer.client.variablewhitelist.enable"; /** * Path to the file to containing subnets and ip addresses to form variable whitelist. */ public static final String DFS_DATATRANSFER_CLIENT_VARIABLEWHITELIST_FILE = "dfs.datatransfer.client.variablewhitelist.file"; /** * time in seconds by which the variable whitelist file is checked for updates */ public static final String DFS_DATATRANSFER_CLIENT_VARIABLEWHITELIST_CACHE_SECS = "dfs.datatransfer.client.variablewhitelist.cache.secs"; @Override public void setConf(Configuration conf) { super.setConf(conf); String fixedFile = conf.get(DFS_DATATRANSFER_SERVER_FIXEDWHITELIST_FILE, FIXEDWHITELIST_DEFAULT_LOCATION); String variableFile = null; long expiryTime = 0; if (conf.getBoolean(DFS_DATATRANSFER_SERVER_VARIABLEWHITELIST_ENABLE, false)) { variableFile = conf.get(DFS_DATATRANSFER_SERVER_VARIABLEWHITELIST_FILE, VARIABLEWHITELIST_DEFAULT_LOCATION); expiryTime = conf.getLong(DFS_DATATRANSFER_SERVER_VARIABLEWHITELIST_CACHE_SECS,3600) * 1000; } whiteListForServer = new CombinedIPWhiteList(fixedFile,variableFile,expiryTime); fixedFile = conf.get(DFS_DATATRANSFER_CLIENT_FIXEDWHITELIST_FILE, fixedFile); expiryTime = 0; if (conf.getBoolean(DFS_DATATRANSFER_CLIENT_VARIABLEWHITELIST_ENABLE, false)) { variableFile = conf.get(DFS_DATATRANSFER_CLIENT_VARIABLEWHITELIST_FILE,variableFile); expiryTime = conf.getLong(DFS_DATATRANSFER_CLIENT_VARIABLEWHITELIST_CACHE_SECS,3600) * 1000; } whitelistForClient = new CombinedIPWhiteList(fixedFile,variableFile,expiryTime); } public boolean isTrusted() { try { return whitelistForClient.isIn(InetAddress.getLocalHost().getHostAddress()); } catch (UnknownHostException e) { return false; } } public boolean isTrusted(InetAddress clientAddress) { return whiteListForServer.isIn(clientAddress.getHostAddress()); } }
4,720
38.341667
93
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/ReplaceDatanodeOnFailure.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; /** * The setting of replace-datanode-on-failure feature. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class ReplaceDatanodeOnFailure { /** The replacement policies */ public enum Policy { /** The feature is disabled in the entire site. */ DISABLE(Condition.FALSE), /** Never add a new datanode. */ NEVER(Condition.FALSE), /** @see ReplaceDatanodeOnFailure.Condition#DEFAULT */ DEFAULT(Condition.DEFAULT), /** Always add a new datanode when an existing datanode is removed. */ ALWAYS(Condition.TRUE); private final Condition condition; private Policy(Condition condition) { this.condition = condition; } Condition getCondition() { return condition; } } /** Datanode replacement condition */ private static interface Condition { /** Return true unconditionally. */ static final Condition TRUE = new Condition() { @Override public boolean satisfy(short replication, DatanodeInfo[] existings, int nExistings, boolean isAppend, boolean isHflushed) { return true; } }; /** Return false unconditionally. */ static final Condition FALSE = new Condition() { @Override public boolean satisfy(short replication, DatanodeInfo[] existings, int nExistings, boolean isAppend, boolean isHflushed) { return false; } }; /** * DEFAULT condition: * Let r be the replication number. * Let n be the number of existing datanodes. * Add a new datanode only if r >= 3 and either * (1) floor(r/2) >= n; or * (2) r > n and the block is hflushed/appended. */ static final Condition DEFAULT = new Condition() { @Override public boolean satisfy(final short replication, final DatanodeInfo[] existings, final int n, final boolean isAppend, final boolean isHflushed) { if (replication < 3) { return false; } else { if (n <= (replication/2)) { return true; } else { return isAppend || isHflushed; } } } }; /** Is the condition satisfied? */ public boolean satisfy(short replication, DatanodeInfo[] existings, int nExistings, boolean isAppend, boolean isHflushed); } private final Policy policy; private final boolean bestEffort; public ReplaceDatanodeOnFailure(Policy policy, boolean bestEffort) { this.policy = policy; this.bestEffort = bestEffort; } /** Check if the feature is enabled. */ public void checkEnabled() { if (policy == Policy.DISABLE) { throw new UnsupportedOperationException( "This feature is disabled. Please refer to " + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY + " configuration property."); } } /** * Best effort means that the client will try to replace the failed datanode * (provided that the policy is satisfied), however, it will continue the * write operation in case that the datanode replacement also fails. * * @return Suppose the datanode replacement fails. * false: An exception should be thrown so that the write will fail. * true : The write should be resumed with the remaining datandoes. */ public boolean isBestEffort() { return bestEffort; } /** Does it need a replacement according to the policy? */ public boolean satisfy( final short replication, final DatanodeInfo[] existings, final boolean isAppend, final boolean isHflushed) { final int n = existings == null? 0: existings.length; if (n == 0 || n >= replication) { //don't need to add datanode for any policy. return false; } else { return policy.getCondition().satisfy( replication, existings, n, isAppend, isHflushed); } } @Override public String toString() { return policy.toString(); } /** Get the setting from configuration. */ public static ReplaceDatanodeOnFailure get(final Configuration conf) { final Policy policy = getPolicy(conf); final boolean bestEffort = conf.getBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.BEST_EFFORT_KEY, HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.BEST_EFFORT_DEFAULT); return new ReplaceDatanodeOnFailure(policy, bestEffort); } private static Policy getPolicy(final Configuration conf) { final boolean enabled = conf.getBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_DEFAULT); if (!enabled) { return Policy.DISABLE; } final String policy = conf.get( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY, HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_DEFAULT); for(int i = 1; i < Policy.values().length; i++) { final Policy p = Policy.values()[i]; if (p.name().equalsIgnoreCase(policy)) { return p; } } throw new HadoopIllegalArgumentException("Illegal configuration value for " + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY + ": " + policy); } /** Write the setting to configuration. */ public static void write(final Policy policy, final boolean bestEffort, final Configuration conf) { conf.setBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, policy != Policy.DISABLE); conf.set( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.POLICY_KEY, policy.name()); conf.setBoolean( HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.BEST_EFFORT_KEY, bestEffort); } }
7,036
34.185
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** Operation */ @InterfaceAudience.Private @InterfaceStability.Evolving public enum Op { WRITE_BLOCK((byte)80), READ_BLOCK((byte)81), READ_METADATA((byte)82), REPLACE_BLOCK((byte)83), COPY_BLOCK((byte)84), BLOCK_CHECKSUM((byte)85), TRANSFER_BLOCK((byte)86), REQUEST_SHORT_CIRCUIT_FDS((byte)87), RELEASE_SHORT_CIRCUIT_FDS((byte)88), REQUEST_SHORT_CIRCUIT_SHM((byte)89); /** The code for this operation. */ public final byte code; private Op(byte code) { this.code = code; } private static final int FIRST_CODE = values()[0].code; /** Return the object represented by the code. */ private static Op valueOf(byte code) { final int i = (code & 0xff) - FIRST_CODE; return i < 0 || i >= values().length? null: values()[i]; } /** Read from in */ public static Op read(DataInput in) throws IOException { return valueOf(in.readByte()); } /** Write to out */ public void write(DataOutput out) throws IOException { out.write(code); } }
2,067
30.333333
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto; import org.apache.hadoop.hdfs.util.ByteBufferOutputStream; import com.google.common.base.Preconditions; import com.google.common.primitives.Shorts; import com.google.common.primitives.Ints; import com.google.protobuf.InvalidProtocolBufferException; /** * Header data for each packet that goes through the read/write pipelines. * Includes all of the information about the packet, excluding checksums and * actual data. * * This data includes: * - the offset in bytes into the HDFS block of the data in this packet * - the sequence number of this packet in the pipeline * - whether or not this is the last packet in the pipeline * - the length of the data in this packet * - whether or not this packet should be synced by the DNs. * * When serialized, this header is written out as a protocol buffer, preceded * by a 4-byte integer representing the full packet length, and a 2-byte short * representing the header length. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class PacketHeader { private static final int MAX_PROTO_SIZE = PacketHeaderProto.newBuilder() .setOffsetInBlock(0) .setSeqno(0) .setLastPacketInBlock(false) .setDataLen(0) .setSyncBlock(false) .build().getSerializedSize(); public static final int PKT_LENGTHS_LEN = Ints.BYTES + Shorts.BYTES; public static final int PKT_MAX_HEADER_LEN = PKT_LENGTHS_LEN + MAX_PROTO_SIZE; private int packetLen; private PacketHeaderProto proto; public PacketHeader() { } public PacketHeader(int packetLen, long offsetInBlock, long seqno, boolean lastPacketInBlock, int dataLen, boolean syncBlock) { this.packetLen = packetLen; Preconditions.checkArgument(packetLen >= Ints.BYTES, "packet len %s should always be at least 4 bytes", packetLen); PacketHeaderProto.Builder builder = PacketHeaderProto.newBuilder() .setOffsetInBlock(offsetInBlock) .setSeqno(seqno) .setLastPacketInBlock(lastPacketInBlock) .setDataLen(dataLen); if (syncBlock) { // Only set syncBlock if it is specified. // This is wire-incompatible with Hadoop 2.0.0-alpha due to HDFS-3721 // because it changes the length of the packet header, and BlockReceiver // in that version did not support variable-length headers. builder.setSyncBlock(syncBlock); } proto = builder.build(); } public int getDataLen() { return proto.getDataLen(); } public boolean isLastPacketInBlock() { return proto.getLastPacketInBlock(); } public long getSeqno() { return proto.getSeqno(); } public long getOffsetInBlock() { return proto.getOffsetInBlock(); } public int getPacketLen() { return packetLen; } public boolean getSyncBlock() { return proto.getSyncBlock(); } @Override public String toString() { return "PacketHeader with packetLen=" + packetLen + " header data: " + proto.toString(); } public void setFieldsFromData( int packetLen, byte[] headerData) throws InvalidProtocolBufferException { this.packetLen = packetLen; proto = PacketHeaderProto.parseFrom(headerData); } public void readFields(ByteBuffer buf) throws IOException { packetLen = buf.getInt(); short protoLen = buf.getShort(); byte[] data = new byte[protoLen]; buf.get(data); proto = PacketHeaderProto.parseFrom(data); } public void readFields(DataInputStream in) throws IOException { this.packetLen = in.readInt(); short protoLen = in.readShort(); byte[] data = new byte[protoLen]; in.readFully(data); proto = PacketHeaderProto.parseFrom(data); } /** * @return the number of bytes necessary to write out this header, * including the length-prefixing of the payload and header */ public int getSerializedSize() { return PKT_LENGTHS_LEN + proto.getSerializedSize(); } /** * Write the header into the buffer. * This requires that PKT_HEADER_LEN bytes are available. */ public void putInBuffer(final ByteBuffer buf) { assert proto.getSerializedSize() <= MAX_PROTO_SIZE : "Expected " + (MAX_PROTO_SIZE) + " got: " + proto.getSerializedSize(); try { buf.putInt(packetLen); buf.putShort((short) proto.getSerializedSize()); proto.writeTo(new ByteBufferOutputStream(buf)); } catch (IOException e) { throw new RuntimeException(e); } } public void write(DataOutputStream out) throws IOException { assert proto.getSerializedSize() <= MAX_PROTO_SIZE : "Expected " + (MAX_PROTO_SIZE) + " got: " + proto.getSerializedSize(); out.writeInt(packetLen); out.writeShort(proto.getSerializedSize()); proto.writeTo(out); } public byte[] getBytes() { ByteBuffer buf = ByteBuffer.allocate(getSerializedSize()); putInBuffer(buf); return buf.array(); } /** * Perform a sanity check on the packet, returning true if it is sane. * @param lastSeqNo the previous sequence number received - we expect the current * sequence number to be larger by 1. */ public boolean sanityCheck(long lastSeqNo) { // We should only have a non-positive data length for the last packet if (proto.getDataLen() <= 0 && !proto.getLastPacketInBlock()) return false; // The last packet should not contain data if (proto.getLastPacketInBlock() && proto.getDataLen() != 0) return false; // Seqnos should always increase by 1 with each packet received if (proto.getSeqno() != lastSeqNo + 1) return false; return true; } @Override public boolean equals(Object o) { if (!(o instanceof PacketHeader)) return false; PacketHeader other = (PacketHeader)o; return this.proto.equals(other.proto); } @Override public int hashCode() { return (int)proto.getSeqno(); } }
7,063
31.855814
83
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** Block Construction Stage */ @InterfaceAudience.Private @InterfaceStability.Evolving public enum BlockConstructionStage { /** The enumerates are always listed as regular stage followed by the * recovery stage. * Changing this order will make getRecoveryStage not working. */ // pipeline set up for block append PIPELINE_SETUP_APPEND, // pipeline set up for failed PIPELINE_SETUP_APPEND recovery PIPELINE_SETUP_APPEND_RECOVERY, // data streaming DATA_STREAMING, // pipeline setup for failed data streaming recovery PIPELINE_SETUP_STREAMING_RECOVERY, // close the block and pipeline PIPELINE_CLOSE, // Recover a failed PIPELINE_CLOSE PIPELINE_CLOSE_RECOVERY, // pipeline set up for block creation PIPELINE_SETUP_CREATE, // transfer RBW for adding datanodes TRANSFER_RBW, // transfer Finalized for adding datanodes TRANSFER_FINALIZED; final static private byte RECOVERY_BIT = (byte)1; /** * get the recovery stage of this stage */ public BlockConstructionStage getRecoveryStage() { if (this == PIPELINE_SETUP_CREATE) { throw new IllegalArgumentException( "Unexpected blockStage " + this); } else { return values()[ordinal()|RECOVERY_BIT]; } } }
2,223
34.301587
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.toProto; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpRequestShortCircuitAccessProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; import org.apache.htrace.Trace; import org.apache.htrace.Span; import com.google.protobuf.Message; /** Sender */ @InterfaceAudience.Private @InterfaceStability.Evolving public class Sender implements DataTransferProtocol { private final DataOutputStream out; /** Create a sender for DataTransferProtocol with a output stream. */ public Sender(final DataOutputStream out) { this.out = out; } /** Initialize a operation. */ private static void op(final DataOutput out, final Op op ) throws IOException { out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); op.write(out); } private static void send(final DataOutputStream out, final Op opcode, final Message proto) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Sending DataTransferOp " + proto.getClass().getSimpleName() + ": " + proto); } op(out, opcode); proto.writeDelimitedTo(out); out.flush(); } static private CachingStrategyProto getCachingStrategy(CachingStrategy cachingStrategy) { CachingStrategyProto.Builder builder = CachingStrategyProto.newBuilder(); if (cachingStrategy.getReadahead() != null) { builder.setReadahead(cachingStrategy.getReadahead().longValue()); } if (cachingStrategy.getDropBehind() != null) { builder.setDropBehind(cachingStrategy.getDropBehind().booleanValue()); } return builder.build(); } @Override public void readBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, final String clientName, final long blockOffset, final long length, final boolean sendChecksum, final CachingStrategy cachingStrategy) throws IOException { OpReadBlockProto proto = OpReadBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken)) .setOffset(blockOffset) .setLen(length) .setSendChecksums(sendChecksum) .setCachingStrategy(getCachingStrategy(cachingStrategy)) .build(); send(out, Op.READ_BLOCK, proto); } @Override public void writeBlock(final ExtendedBlock blk, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes, final DatanodeInfo source, final BlockConstructionStage stage, final int pipelineSize, final long minBytesRcvd, final long maxBytesRcvd, final long latestGenerationStamp, DataChecksum requestedChecksum, final CachingStrategy cachingStrategy, final boolean allowLazyPersist, final boolean pinning, final boolean[] targetPinnings) throws IOException { ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader( blk, clientName, blockToken); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(requestedChecksum); OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder() .setHeader(header) .setStorageType(PBHelper.convertStorageType(storageType)) .addAllTargets(PBHelper.convert(targets, 1)) .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1)) .setStage(toProto(stage)) .setPipelineSize(pipelineSize) .setMinBytesRcvd(minBytesRcvd) .setMaxBytesRcvd(maxBytesRcvd) .setLatestGenerationStamp(latestGenerationStamp) .setRequestedChecksum(checksumProto) .setCachingStrategy(getCachingStrategy(cachingStrategy)) .setAllowLazyPersist(allowLazyPersist) .setPinning(pinning) .addAllTargetPinnings(PBHelper.convert(targetPinnings, 1)); if (source != null) { proto.setSource(PBHelper.convertDatanodeInfo(source)); } send(out, Op.WRITE_BLOCK, proto.build()); } @Override public void transferBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes) throws IOException { OpTransferBlockProto proto = OpTransferBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildClientHeader( blk, clientName, blockToken)) .addAllTargets(PBHelper.convert(targets)) .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes)) .build(); send(out, Op.TRANSFER_BLOCK, proto); } @Override public void requestShortCircuitFds(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, SlotId slotId, int maxVersion, boolean supportsReceiptVerification) throws IOException { OpRequestShortCircuitAccessProto.Builder builder = OpRequestShortCircuitAccessProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader( blk, blockToken)).setMaxVersion(maxVersion); if (slotId != null) { builder.setSlotId(PBHelper.convert(slotId)); } builder.setSupportsReceiptVerification(supportsReceiptVerification); OpRequestShortCircuitAccessProto proto = builder.build(); send(out, Op.REQUEST_SHORT_CIRCUIT_FDS, proto); } @Override public void releaseShortCircuitFds(SlotId slotId) throws IOException { ReleaseShortCircuitAccessRequestProto.Builder builder = ReleaseShortCircuitAccessRequestProto.newBuilder(). setSlotId(PBHelper.convert(slotId)); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() .setTraceId(s.getTraceId()).setParentId(s.getSpanId())); } ReleaseShortCircuitAccessRequestProto proto = builder.build(); send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto); } @Override public void requestShortCircuitShm(String clientName) throws IOException { ShortCircuitShmRequestProto.Builder builder = ShortCircuitShmRequestProto.newBuilder(). setClientName(clientName); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() .setTraceId(s.getTraceId()).setParentId(s.getSpanId())); } ShortCircuitShmRequestProto proto = builder.build(); send(out, Op.REQUEST_SHORT_CIRCUIT_SHM, proto); } @Override public void replaceBlock(final ExtendedBlock blk, final StorageType storageType, final Token<BlockTokenIdentifier> blockToken, final String delHint, final DatanodeInfo source) throws IOException { OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken)) .setStorageType(PBHelper.convertStorageType(storageType)) .setDelHint(delHint) .setSource(PBHelper.convertDatanodeInfo(source)) .build(); send(out, Op.REPLACE_BLOCK, proto); } @Override public void copyBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken) throws IOException { OpCopyBlockProto proto = OpCopyBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken)) .build(); send(out, Op.COPY_BLOCK, proto); } @Override public void blockChecksum(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken) throws IOException { OpBlockChecksumProto proto = OpBlockChecksumProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken)) .build(); send(out, Op.BLOCK_CHECKSUM, proto); } }
10,400
38.698473
102
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer; import java.io.Closeable; import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.channels.ReadableByteChannel; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; import com.google.common.primitives.Ints; /** * Class to handle reading packets one-at-a-time from the wire. * These packets are used both for reading and writing data to/from * DataNodes. */ @InterfaceAudience.Private public class PacketReceiver implements Closeable { /** * The max size of any single packet. This prevents OOMEs when * invalid data is sent. */ private static final int MAX_PACKET_SIZE = 16 * 1024 * 1024; static final Log LOG = LogFactory.getLog(PacketReceiver.class); private static final DirectBufferPool bufferPool = new DirectBufferPool(); private final boolean useDirectBuffers; /** * The entirety of the most recently read packet. * The first PKT_LENGTHS_LEN bytes of this buffer are the * length prefixes. */ private ByteBuffer curPacketBuf = null; /** * A slice of {@link #curPacketBuf} which contains just the checksums. */ private ByteBuffer curChecksumSlice = null; /** * A slice of {@link #curPacketBuf} which contains just the data. */ private ByteBuffer curDataSlice = null; /** * The packet header of the most recently read packet. */ private PacketHeader curHeader; public PacketReceiver(boolean useDirectBuffers) { this.useDirectBuffers = useDirectBuffers; reallocPacketBuf(PacketHeader.PKT_LENGTHS_LEN); } public PacketHeader getHeader() { return curHeader; } public ByteBuffer getDataSlice() { return curDataSlice; } public ByteBuffer getChecksumSlice() { return curChecksumSlice; } /** * Reads all of the data for the next packet into the appropriate buffers. * * The data slice and checksum slice members will be set to point to the * user data and corresponding checksums. The header will be parsed and * set. */ public void receiveNextPacket(ReadableByteChannel in) throws IOException { doRead(in, null); } /** * @see #receiveNextPacket(ReadableByteChannel) */ public void receiveNextPacket(InputStream in) throws IOException { doRead(null, in); } private void doRead(ReadableByteChannel ch, InputStream in) throws IOException { // Each packet looks like: // PLEN HLEN HEADER CHECKSUMS DATA // 32-bit 16-bit <protobuf> <variable length> // // PLEN: Payload length // = length(PLEN) + length(CHECKSUMS) + length(DATA) // This length includes its own encoded length in // the sum for historical reasons. // // HLEN: Header length // = length(HEADER) // // HEADER: the actual packet header fields, encoded in protobuf // CHECKSUMS: the crcs for the data chunk. May be missing if // checksums were not requested // DATA the actual block data Preconditions.checkState(curHeader == null || !curHeader.isLastPacketInBlock()); curPacketBuf.clear(); curPacketBuf.limit(PacketHeader.PKT_LENGTHS_LEN); doReadFully(ch, in, curPacketBuf); curPacketBuf.flip(); int payloadLen = curPacketBuf.getInt(); if (payloadLen < Ints.BYTES) { // The "payload length" includes its own length. Therefore it // should never be less than 4 bytes throw new IOException("Invalid payload length " + payloadLen); } int dataPlusChecksumLen = payloadLen - Ints.BYTES; int headerLen = curPacketBuf.getShort(); if (headerLen < 0) { throw new IOException("Invalid header length " + headerLen); } if (LOG.isTraceEnabled()) { LOG.trace("readNextPacket: dataPlusChecksumLen = " + dataPlusChecksumLen + " headerLen = " + headerLen); } // Sanity check the buffer size so we don't allocate too much memory // and OOME. int totalLen = payloadLen + headerLen; if (totalLen < 0 || totalLen > MAX_PACKET_SIZE) { throw new IOException("Incorrect value for packet payload size: " + payloadLen); } // Make sure we have space for the whole packet, and // read it. reallocPacketBuf(PacketHeader.PKT_LENGTHS_LEN + dataPlusChecksumLen + headerLen); curPacketBuf.clear(); curPacketBuf.position(PacketHeader.PKT_LENGTHS_LEN); curPacketBuf.limit(PacketHeader.PKT_LENGTHS_LEN + dataPlusChecksumLen + headerLen); doReadFully(ch, in, curPacketBuf); curPacketBuf.flip(); curPacketBuf.position(PacketHeader.PKT_LENGTHS_LEN); // Extract the header from the front of the buffer (after the length prefixes) byte[] headerBuf = new byte[headerLen]; curPacketBuf.get(headerBuf); if (curHeader == null) { curHeader = new PacketHeader(); } curHeader.setFieldsFromData(payloadLen, headerBuf); // Compute the sub-slices of the packet int checksumLen = dataPlusChecksumLen - curHeader.getDataLen(); if (checksumLen < 0) { throw new IOException("Invalid packet: data length in packet header " + "exceeds data length received. dataPlusChecksumLen=" + dataPlusChecksumLen + " header: " + curHeader); } reslicePacket(headerLen, checksumLen, curHeader.getDataLen()); } /** * Rewrite the last-read packet on the wire to the given output stream. */ public void mirrorPacketTo(DataOutputStream mirrorOut) throws IOException { Preconditions.checkState(!useDirectBuffers, "Currently only supported for non-direct buffers"); mirrorOut.write(curPacketBuf.array(), curPacketBuf.arrayOffset(), curPacketBuf.remaining()); } private static void doReadFully(ReadableByteChannel ch, InputStream in, ByteBuffer buf) throws IOException { if (ch != null) { readChannelFully(ch, buf); } else { Preconditions.checkState(!buf.isDirect(), "Must not use direct buffers with InputStream API"); IOUtils.readFully(in, buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); buf.position(buf.position() + buf.remaining()); } } private void reslicePacket( int headerLen, int checksumsLen, int dataLen) { // Packet structure (refer to doRead() for details): // PLEN HLEN HEADER CHECKSUMS DATA // 32-bit 16-bit <protobuf> <variable length> // |--- lenThroughHeader ----| // |----------- lenThroughChecksums ----| // |------------------- lenThroughData ------| int lenThroughHeader = PacketHeader.PKT_LENGTHS_LEN + headerLen; int lenThroughChecksums = lenThroughHeader + checksumsLen; int lenThroughData = lenThroughChecksums + dataLen; assert dataLen >= 0 : "invalid datalen: " + dataLen; assert curPacketBuf.position() == lenThroughHeader; assert curPacketBuf.limit() == lenThroughData : "headerLen= " + headerLen + " clen=" + checksumsLen + " dlen=" + dataLen + " rem=" + curPacketBuf.remaining(); // Slice the checksums. curPacketBuf.position(lenThroughHeader); curPacketBuf.limit(lenThroughChecksums); curChecksumSlice = curPacketBuf.slice(); // Slice the data. curPacketBuf.position(lenThroughChecksums); curPacketBuf.limit(lenThroughData); curDataSlice = curPacketBuf.slice(); // Reset buffer to point to the entirety of the packet (including // length prefixes) curPacketBuf.position(0); curPacketBuf.limit(lenThroughData); } private static void readChannelFully(ReadableByteChannel ch, ByteBuffer buf) throws IOException { while (buf.remaining() > 0) { int n = ch.read(buf); if (n < 0) { throw new IOException("Premature EOF reading from " + ch); } } } private void reallocPacketBuf(int atLeastCapacity) { // Realloc the buffer if this packet is longer than the previous // one. if (curPacketBuf == null || curPacketBuf.capacity() < atLeastCapacity) { ByteBuffer newBuf; if (useDirectBuffers) { newBuf = bufferPool.getBuffer(atLeastCapacity); } else { newBuf = ByteBuffer.allocate(atLeastCapacity); } // If reallocing an existing buffer, copy the old packet length // prefixes over if (curPacketBuf != null) { curPacketBuf.flip(); newBuf.put(curPacketBuf); } returnPacketBufToPool(); curPacketBuf = newBuf; } } private void returnPacketBufToPool() { if (curPacketBuf != null && curPacketBuf.isDirect()) { bufferPool.returnBuffer(curPacketBuf); curPacketBuf = null; } } @Override // Closeable public void close() { returnPacketBufToPool(); } @Override protected void finalize() throws Throwable { try { // just in case it didn't get closed, we // may as well still try to return the buffer returnPacketBufToPool(); } finally { super.finalize(); } } }
10,241
31.932476
84
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY; import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.InetAddress; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; import javax.security.sasl.Sasl; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherOption; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.crypto.CryptoOutputStream; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.security.SaslPropertiesResolver; import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.google.common.net.InetAddresses; import com.google.protobuf.ByteString; /** * Utility methods implementing SASL negotiation for DataTransferProtocol. */ @InterfaceAudience.Private public final class DataTransferSaslUtil { private static final Logger LOG = LoggerFactory.getLogger( DataTransferSaslUtil.class); /** * Delimiter for the three-part SASL username string. */ public static final String NAME_DELIMITER = " "; /** * Sent by clients and validated by servers. We use a number that's unlikely * to ever be sent as the value of the DATA_TRANSFER_VERSION. */ public static final int SASL_TRANSFER_MAGIC_NUMBER = 0xDEADBEEF; /** * Checks that SASL negotiation has completed for the given participant, and * the negotiated quality of protection is included in the given SASL * properties and therefore acceptable. * * @param sasl participant to check * @param saslProps properties of SASL negotiation * @throws IOException for any error */ public static void checkSaslComplete(SaslParticipant sasl, Map<String, String> saslProps) throws IOException { if (!sasl.isComplete()) { throw new IOException("Failed to complete SASL handshake"); } Set<String> requestedQop = ImmutableSet.copyOf(Arrays.asList( saslProps.get(Sasl.QOP).split(","))); String negotiatedQop = sasl.getNegotiatedQop(); LOG.debug("Verifying QOP, requested QOP = {}, negotiated QOP = {}", requestedQop, negotiatedQop); if (!requestedQop.contains(negotiatedQop)) { throw new IOException(String.format("SASL handshake completed, but " + "channel does not have acceptable quality of protection, " + "requested = %s, negotiated = %s", requestedQop, negotiatedQop)); } } /** * Check whether requested SASL Qop contains privacy. * * @param saslProps properties of SASL negotiation * @return boolean true if privacy exists */ public static boolean requestedQopContainsPrivacy( Map<String, String> saslProps) { Set<String> requestedQop = ImmutableSet.copyOf(Arrays.asList( saslProps.get(Sasl.QOP).split(","))); return requestedQop.contains("auth-conf"); } /** * Creates SASL properties required for an encrypted SASL negotiation. * * @param encryptionAlgorithm to use for SASL negotation * @return properties of encrypted SASL negotiation */ public static Map<String, String> createSaslPropertiesForEncryption( String encryptionAlgorithm) { Map<String, String> saslProps = Maps.newHashMapWithExpectedSize(3); saslProps.put(Sasl.QOP, QualityOfProtection.PRIVACY.getSaslQop()); saslProps.put(Sasl.SERVER_AUTH, "true"); saslProps.put("com.sun.security.sasl.digest.cipher", encryptionAlgorithm); return saslProps; } /** * For an encrypted SASL negotiation, encodes an encryption key to a SASL * password. * * @param encryptionKey to encode * @return key encoded as SASL password */ public static char[] encryptionKeyToPassword(byte[] encryptionKey) { return new String(Base64.encodeBase64(encryptionKey, false), Charsets.UTF_8) .toCharArray(); } /** * Returns InetAddress from peer. The getRemoteAddressString has the form * [host][/ip-address]:port. The host may be missing. The IP address (and * preceding '/') may be missing. The port preceded by ':' is always present. * * @param peer * @return InetAddress from peer */ public static InetAddress getPeerAddress(Peer peer) { String remoteAddr = peer.getRemoteAddressString().split(":")[0]; int slashIdx = remoteAddr.indexOf('/'); return InetAddresses.forString(slashIdx != -1 ? remoteAddr.substring(slashIdx + 1, remoteAddr.length()) : remoteAddr); } /** * Creates a SaslPropertiesResolver from the given configuration. This method * works by cloning the configuration, translating configuration properties * specific to DataTransferProtocol to what SaslPropertiesResolver expects, * and then delegating to SaslPropertiesResolver for initialization. This * method returns null if SASL protection has not been configured for * DataTransferProtocol. * * @param conf configuration to read * @return SaslPropertiesResolver for DataTransferProtocol, or null if not * configured */ public static SaslPropertiesResolver getSaslPropertiesResolver( Configuration conf) { String qops = conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY); if (qops == null || qops.isEmpty()) { LOG.debug("DataTransferProtocol not using SaslPropertiesResolver, no " + "QOP found in configuration for {}", DFS_DATA_TRANSFER_PROTECTION_KEY); return null; } Configuration saslPropsResolverConf = new Configuration(conf); saslPropsResolverConf.set(HADOOP_RPC_PROTECTION, qops); Class<? extends SaslPropertiesResolver> resolverClass = conf.getClass( HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS, SaslPropertiesResolver.class, SaslPropertiesResolver.class); resolverClass = conf.getClass(DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY, resolverClass, SaslPropertiesResolver.class); saslPropsResolverConf.setClass(HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS, resolverClass, SaslPropertiesResolver.class); SaslPropertiesResolver resolver = SaslPropertiesResolver.getInstance( saslPropsResolverConf); LOG.debug("DataTransferProtocol using SaslPropertiesResolver, configured " + "QOP {} = {}, configured class {} = {}", DFS_DATA_TRANSFER_PROTECTION_KEY, qops, DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY, resolverClass); return resolver; } /** * Reads a SASL negotiation message. * * @param in stream to read * @return bytes of SASL negotiation messsage * @throws IOException for any error */ public static byte[] readSaslMessage(InputStream in) throws IOException { DataTransferEncryptorMessageProto proto = DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { throw new InvalidEncryptionKeyException(proto.getMessage()); } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { throw new IOException(proto.getMessage()); } else { return proto.getPayload().toByteArray(); } } /** * Reads a SASL negotiation message and negotiation cipher options. * * @param in stream to read * @param cipherOptions list to store negotiation cipher options * @return byte[] SASL negotiation message * @throws IOException for any error */ public static byte[] readSaslMessageAndNegotiationCipherOptions( InputStream in, List<CipherOption> cipherOptions) throws IOException { DataTransferEncryptorMessageProto proto = DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { throw new InvalidEncryptionKeyException(proto.getMessage()); } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { throw new IOException(proto.getMessage()); } else { List<CipherOptionProto> optionProtos = proto.getCipherOptionList(); if (optionProtos != null) { for (CipherOptionProto optionProto : optionProtos) { cipherOptions.add(PBHelper.convert(optionProto)); } } return proto.getPayload().toByteArray(); } } /** * Negotiate a cipher option which server supports. * * @param conf the configuration * @param options the cipher options which client supports * @return CipherOption negotiated cipher option */ public static CipherOption negotiateCipherOption(Configuration conf, List<CipherOption> options) throws IOException { // Negotiate cipher suites if configured. Currently, the only supported // cipher suite is AES/CTR/NoPadding, but the protocol allows multiple // values for future expansion. String cipherSuites = conf.get(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY); if (cipherSuites == null || cipherSuites.isEmpty()) { return null; } if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) { throw new IOException(String.format("Invalid cipher suite, %s=%s", DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites)); } if (options != null) { for (CipherOption option : options) { CipherSuite suite = option.getCipherSuite(); if (suite == CipherSuite.AES_CTR_NOPADDING) { int keyLen = conf.getInt( DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY, DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT) / 8; CryptoCodec codec = CryptoCodec.getInstance(conf, suite); byte[] inKey = new byte[keyLen]; byte[] inIv = new byte[suite.getAlgorithmBlockSize()]; byte[] outKey = new byte[keyLen]; byte[] outIv = new byte[suite.getAlgorithmBlockSize()]; codec.generateSecureRandom(inKey); codec.generateSecureRandom(inIv); codec.generateSecureRandom(outKey); codec.generateSecureRandom(outIv); return new CipherOption(suite, inKey, inIv, outKey, outIv); } } } return null; } /** * Send SASL message and negotiated cipher option to client. * * @param out stream to receive message * @param payload to send * @param option negotiated cipher option * @throws IOException for any error */ public static void sendSaslMessageAndNegotiatedCipherOption( OutputStream out, byte[] payload, CipherOption option) throws IOException { DataTransferEncryptorMessageProto.Builder builder = DataTransferEncryptorMessageProto.newBuilder(); builder.setStatus(DataTransferEncryptorStatus.SUCCESS); if (payload != null) { builder.setPayload(ByteString.copyFrom(payload)); } if (option != null) { builder.addCipherOption(PBHelper.convert(option)); } DataTransferEncryptorMessageProto proto = builder.build(); proto.writeDelimitedTo(out); out.flush(); } /** * Create IOStreamPair of {@link org.apache.hadoop.crypto.CryptoInputStream} * and {@link org.apache.hadoop.crypto.CryptoOutputStream} * * @param conf the configuration * @param cipherOption negotiated cipher option * @param out underlying output stream * @param in underlying input stream * @param isServer is server side * @return IOStreamPair the stream pair * @throws IOException for any error */ public static IOStreamPair createStreamPair(Configuration conf, CipherOption cipherOption, OutputStream out, InputStream in, boolean isServer) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Creating IOStreamPair of CryptoInputStream and " + "CryptoOutputStream."); } CryptoCodec codec = CryptoCodec.getInstance(conf, cipherOption.getCipherSuite()); byte[] inKey = cipherOption.getInKey(); byte[] inIv = cipherOption.getInIv(); byte[] outKey = cipherOption.getOutKey(); byte[] outIv = cipherOption.getOutIv(); InputStream cIn = new CryptoInputStream(in, codec, isServer ? inKey : outKey, isServer ? inIv : outIv); OutputStream cOut = new CryptoOutputStream(out, codec, isServer ? outKey : inKey, isServer ? outIv : inIv); return new IOStreamPair(cIn, cOut); } /** * Sends a SASL negotiation message indicating an error. * * @param out stream to receive message * @param message to send * @throws IOException for any error */ public static void sendGenericSaslErrorMessage(OutputStream out, String message) throws IOException { sendSaslMessage(out, DataTransferEncryptorStatus.ERROR, null, message); } /** * Sends a SASL negotiation message. * * @param out stream to receive message * @param payload to send * @throws IOException for any error */ public static void sendSaslMessage(OutputStream out, byte[] payload) throws IOException { sendSaslMessage(out, DataTransferEncryptorStatus.SUCCESS, payload, null); } /** * Send a SASL negotiation message and negotiation cipher options to server. * * @param out stream to receive message * @param payload to send * @param options cipher options to negotiate * @throws IOException for any error */ public static void sendSaslMessageAndNegotiationCipherOptions( OutputStream out, byte[] payload, List<CipherOption> options) throws IOException { DataTransferEncryptorMessageProto.Builder builder = DataTransferEncryptorMessageProto.newBuilder(); builder.setStatus(DataTransferEncryptorStatus.SUCCESS); if (payload != null) { builder.setPayload(ByteString.copyFrom(payload)); } if (options != null) { builder.addAllCipherOption(PBHelper.convertCipherOptions(options)); } DataTransferEncryptorMessageProto proto = builder.build(); proto.writeDelimitedTo(out); out.flush(); } /** * Read SASL message and negotiated cipher option from server. * * @param in stream to read * @return SaslResponseWithNegotiatedCipherOption SASL message and * negotiated cipher option * @throws IOException for any error */ public static SaslResponseWithNegotiatedCipherOption readSaslMessageAndNegotiatedCipherOption(InputStream in) throws IOException { DataTransferEncryptorMessageProto proto = DataTransferEncryptorMessageProto.parseFrom(vintPrefixed(in)); if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { throw new InvalidEncryptionKeyException(proto.getMessage()); } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { throw new IOException(proto.getMessage()); } else { byte[] response = proto.getPayload().toByteArray(); List<CipherOption> options = PBHelper.convertCipherOptionProtos( proto.getCipherOptionList()); CipherOption option = null; if (options != null && !options.isEmpty()) { option = options.get(0); } return new SaslResponseWithNegotiatedCipherOption(response, option); } } /** * Encrypt the key and iv of the negotiated cipher option. * * @param option negotiated cipher option * @param sasl SASL participant representing server * @return CipherOption negotiated cipher option which contains the * encrypted key and iv * @throws IOException for any error */ public static CipherOption wrap(CipherOption option, SaslParticipant sasl) throws IOException { if (option != null) { byte[] inKey = option.getInKey(); if (inKey != null) { inKey = sasl.wrap(inKey, 0, inKey.length); } byte[] outKey = option.getOutKey(); if (outKey != null) { outKey = sasl.wrap(outKey, 0, outKey.length); } return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(), outKey, option.getOutIv()); } return null; } /** * Decrypt the key and iv of the negotiated cipher option. * * @param option negotiated cipher option * @param sasl SASL participant representing client * @return CipherOption negotiated cipher option which contains the * decrypted key and iv * @throws IOException for any error */ public static CipherOption unwrap(CipherOption option, SaslParticipant sasl) throws IOException { if (option != null) { byte[] inKey = option.getInKey(); if (inKey != null) { inKey = sasl.unwrap(inKey, 0, inKey.length); } byte[] outKey = option.getOutKey(); if (outKey != null) { outKey = sasl.unwrap(outKey, 0, outKey.length); } return new CipherOption(option.getCipherSuite(), inKey, option.getInIv(), outKey, option.getOutIv()); } return null; } /** * Sends a SASL negotiation message. * * @param out stream to receive message * @param status negotiation status * @param payload to send * @param message to send * @throws IOException for any error */ public static void sendSaslMessage(OutputStream out, DataTransferEncryptorStatus status, byte[] payload, String message) throws IOException { DataTransferEncryptorMessageProto.Builder builder = DataTransferEncryptorMessageProto.newBuilder(); builder.setStatus(status); if (payload != null) { builder.setPayload(ByteString.copyFrom(payload)); } if (message != null) { builder.setMessage(message); } DataTransferEncryptorMessageProto proto = builder.build(); proto.writeDelimitedTo(out); out.flush(); } /** * There is no reason to instantiate this class. */ private DataTransferSaslUtil() { } }
20,132
37.717308
126
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.List; import java.util.Map; import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.NameCallback; import javax.security.auth.callback.PasswordCallback; import javax.security.auth.callback.UnsupportedCallbackException; import javax.security.sasl.AuthorizeCallback; import javax.security.sasl.RealmCallback; import javax.security.sasl.SaslException; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.crypto.CipherOption; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus; import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DNConf; import org.apache.hadoop.security.SaslPropertiesResolver; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Charsets; import com.google.common.collect.Lists; /** * Negotiates SASL for DataTransferProtocol on behalf of a server. There are * two possible supported variants of SASL negotiation: either a general-purpose * negotiation supporting any quality of protection, or a specialized * negotiation that enforces privacy as the quality of protection using a * cryptographically strong encryption key. * * This class is used in the DataNode for handling inbound connections. */ @InterfaceAudience.Private public class SaslDataTransferServer { private static final Logger LOG = LoggerFactory.getLogger( SaslDataTransferServer.class); private final BlockPoolTokenSecretManager blockPoolTokenSecretManager; private final DNConf dnConf; /** * Creates a new SaslDataTransferServer. * * @param dnConf configuration of DataNode * @param blockPoolTokenSecretManager used for checking block access tokens * and encryption keys */ public SaslDataTransferServer(DNConf dnConf, BlockPoolTokenSecretManager blockPoolTokenSecretManager) { this.blockPoolTokenSecretManager = blockPoolTokenSecretManager; this.dnConf = dnConf; } /** * Receives SASL negotiation from a peer on behalf of a server. * * @param peer connection peer * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param int xferPort data transfer port of DataNode accepting connection * @param datanodeId ID of DataNode accepting connection * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ public IOStreamPair receive(Peer peer, OutputStream underlyingOut, InputStream underlyingIn, int xferPort, DatanodeID datanodeId) throws IOException { if (dnConf.getEncryptDataTransfer()) { LOG.debug( "SASL server doing encrypted handshake for peer = {}, datanodeId = {}", peer, datanodeId); return getEncryptedStreams(peer, underlyingOut, underlyingIn); } else if (!UserGroupInformation.isSecurityEnabled()) { LOG.debug( "SASL server skipping handshake in unsecured configuration for " + "peer = {}, datanodeId = {}", peer, datanodeId); return new IOStreamPair(underlyingIn, underlyingOut); } else if (SecurityUtil.isPrivilegedPort(xferPort)) { LOG.debug( "SASL server skipping handshake in secured configuration for " + "peer = {}, datanodeId = {}", peer, datanodeId); return new IOStreamPair(underlyingIn, underlyingOut); } else if (dnConf.getSaslPropsResolver() != null) { LOG.debug( "SASL server doing general handshake for peer = {}, datanodeId = {}", peer, datanodeId); return getSaslStreams(peer, underlyingOut, underlyingIn); } else if (dnConf.getIgnoreSecurePortsForTesting()) { // It's a secured cluster using non-privileged ports, but no SASL. The // only way this can happen is if the DataNode has // ignore.secure.ports.for.testing configured, so this is a rare edge case. LOG.debug( "SASL server skipping handshake in secured configuration with no SASL " + "protection configured for peer = {}, datanodeId = {}", peer, datanodeId); return new IOStreamPair(underlyingIn, underlyingOut); } else { // The error message here intentionally does not mention // ignore.secure.ports.for.testing. That's intended for dev use only. // This code path is not expected to execute ever, because DataNode startup // checks for invalid configuration and aborts. throw new IOException(String.format("Cannot create a secured " + "connection if DataNode listens on unprivileged port (%d) and no " + "protection is defined in configuration property %s.", datanodeId.getXferPort(), DFS_DATA_TRANSFER_PROTECTION_KEY)); } } /** * Receives SASL negotiation for specialized encrypted handshake. * * @param peer connection peer * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair getEncryptedStreams(Peer peer, OutputStream underlyingOut, InputStream underlyingIn) throws IOException { if (peer.hasSecureChannel() || dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) { return new IOStreamPair(underlyingIn, underlyingOut); } Map<String, String> saslProps = createSaslPropertiesForEncryption( dnConf.getEncryptionAlgorithm()); if (LOG.isDebugEnabled()) { LOG.debug("Server using encryption algorithm " + dnConf.getEncryptionAlgorithm()); } CallbackHandler callbackHandler = new SaslServerCallbackHandler( new PasswordFunction() { @Override public char[] apply(String userName) throws IOException { return encryptionKeyToPassword(getEncryptionKeyFromUserName(userName)); } }); return doSaslHandshake(underlyingOut, underlyingIn, saslProps, callbackHandler); } /** * The SASL handshake for encrypted vs. general-purpose uses different logic * for determining the password. This interface is used to parameterize that * logic. It's similar to a Guava Function, but we need to let it throw * exceptions. */ private interface PasswordFunction { /** * Returns the SASL password for the given user name. * * @param userName SASL user name * @return SASL password * @throws IOException for any error */ char[] apply(String userName) throws IOException; } /** * Sets user name and password when asked by the server-side SASL object. */ private static final class SaslServerCallbackHandler implements CallbackHandler { private final PasswordFunction passwordFunction; /** * Creates a new SaslServerCallbackHandler. * * @param passwordFunction for determing the user's password */ public SaslServerCallbackHandler(PasswordFunction passwordFunction) { this.passwordFunction = passwordFunction; } @Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { NameCallback nc = null; PasswordCallback pc = null; AuthorizeCallback ac = null; for (Callback callback : callbacks) { if (callback instanceof AuthorizeCallback) { ac = (AuthorizeCallback) callback; } else if (callback instanceof PasswordCallback) { pc = (PasswordCallback) callback; } else if (callback instanceof NameCallback) { nc = (NameCallback) callback; } else if (callback instanceof RealmCallback) { continue; // realm is ignored } else { throw new UnsupportedCallbackException(callback, "Unrecognized SASL DIGEST-MD5 Callback: " + callback); } } if (pc != null) { pc.setPassword(passwordFunction.apply(nc.getDefaultName())); } if (ac != null) { ac.setAuthorized(true); ac.setAuthorizedID(ac.getAuthorizationID()); } } } /** * Given a secret manager and a username encoded for the encrypted handshake, * determine the encryption key. * * @param userName containing the keyId, blockPoolId, and nonce. * @return secret encryption key. * @throws IOException */ private byte[] getEncryptionKeyFromUserName(String userName) throws IOException { String[] nameComponents = userName.split(NAME_DELIMITER); if (nameComponents.length != 3) { throw new IOException("Provided name '" + userName + "' has " + nameComponents.length + " components instead of the expected 3."); } int keyId = Integer.parseInt(nameComponents[0]); String blockPoolId = nameComponents[1]; byte[] nonce = Base64.decodeBase64(nameComponents[2]); return blockPoolTokenSecretManager.retrieveDataEncryptionKey(keyId, blockPoolId, nonce); } /** * Receives SASL negotiation for general-purpose handshake. * * @param peer connection peer * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair getSaslStreams(Peer peer, OutputStream underlyingOut, InputStream underlyingIn) throws IOException { if (peer.hasSecureChannel() || dnConf.getTrustedChannelResolver().isTrusted(getPeerAddress(peer))) { return new IOStreamPair(underlyingIn, underlyingOut); } SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver(); Map<String, String> saslProps = saslPropsResolver.getServerProperties( getPeerAddress(peer)); CallbackHandler callbackHandler = new SaslServerCallbackHandler( new PasswordFunction() { @Override public char[] apply(String userName) throws IOException { return buildServerPassword(userName); } }); return doSaslHandshake(underlyingOut, underlyingIn, saslProps, callbackHandler); } /** * Calculates the expected correct password on the server side for the * general-purpose handshake. The password consists of the block access * token's password (known to the DataNode via its secret manager). This * expects that the client has supplied a user name consisting of its * serialized block access token identifier. * * @param userName SASL user name containing serialized block access token * identifier * @return expected correct SASL password * @throws IOException for any error */ private char[] buildServerPassword(String userName) throws IOException { BlockTokenIdentifier identifier = deserializeIdentifier(userName); byte[] tokenPassword = blockPoolTokenSecretManager.retrievePassword( identifier); return (new String(Base64.encodeBase64(tokenPassword, false), Charsets.UTF_8)).toCharArray(); } /** * Deserializes a base64-encoded binary representation of a block access * token. * * @param str String to deserialize * @return BlockTokenIdentifier deserialized from str * @throws IOException if there is any I/O error */ private BlockTokenIdentifier deserializeIdentifier(String str) throws IOException { BlockTokenIdentifier identifier = new BlockTokenIdentifier(); identifier.readFields(new DataInputStream(new ByteArrayInputStream( Base64.decodeBase64(str)))); return identifier; } /** * This method actually executes the server-side SASL handshake. * * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param saslProps properties of SASL negotiation * @param callbackHandler for responding to SASL callbacks * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair doSaslHandshake(OutputStream underlyingOut, InputStream underlyingIn, Map<String, String> saslProps, CallbackHandler callbackHandler) throws IOException { DataInputStream in = new DataInputStream(underlyingIn); DataOutputStream out = new DataOutputStream(underlyingOut); SaslParticipant sasl = SaslParticipant.createServerSaslParticipant(saslProps, callbackHandler); int magicNumber = in.readInt(); if (magicNumber != SASL_TRANSFER_MAGIC_NUMBER) { throw new InvalidMagicNumberException(magicNumber, dnConf.getEncryptDataTransfer()); } try { // step 1 byte[] remoteResponse = readSaslMessage(in); byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse); sendSaslMessage(out, localResponse); // step 2 (server-side only) List<CipherOption> cipherOptions = Lists.newArrayList(); remoteResponse = readSaslMessageAndNegotiationCipherOptions( in, cipherOptions); localResponse = sasl.evaluateChallengeOrResponse(remoteResponse); // SASL handshake is complete checkSaslComplete(sasl, saslProps); CipherOption cipherOption = null; if (sasl.isNegotiatedQopPrivacy()) { // Negotiate a cipher option cipherOption = negotiateCipherOption(dnConf.getConf(), cipherOptions); if (cipherOption != null) { if (LOG.isDebugEnabled()) { LOG.debug("Server using cipher suite " + cipherOption.getCipherSuite().getName()); } } } // If negotiated cipher option is not null, wrap it before sending. sendSaslMessageAndNegotiatedCipherOption(out, localResponse, wrap(cipherOption, sasl)); // If negotiated cipher option is not null, we will use it to create // stream pair. return cipherOption != null ? createStreamPair( dnConf.getConf(), cipherOption, underlyingOut, underlyingIn, true) : sasl.createStreamPair(out, in); } catch (IOException ioe) { if (ioe instanceof SaslException && ioe.getCause() != null && ioe.getCause() instanceof InvalidEncryptionKeyException) { // This could just be because the client is long-lived and hasn't gotten // a new encryption key from the NN in a while. Upon receiving this // error, the client will get a new encryption key from the NN and retry // connecting to this DN. sendInvalidKeySaslErrorMessage(out, ioe.getCause().getMessage()); } else { sendGenericSaslErrorMessage(out, ioe.getMessage()); } throw ioe; } } /** * Sends a SASL negotiation message indicating an invalid key error. * * @param out stream to receive message * @param message to send * @throws IOException for any error */ private static void sendInvalidKeySaslErrorMessage(DataOutputStream out, String message) throws IOException { sendSaslMessage(out, DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY, null, message); } }
16,872
38.422897
126
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY; import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.InetAddress; import java.net.Socket; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.NameCallback; import javax.security.auth.callback.PasswordCallback; import javax.security.auth.callback.UnsupportedCallbackException; import javax.security.sasl.RealmCallback; import javax.security.sasl.RealmChoiceCallback; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherOption; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.hdfs.net.EncryptedPeer; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.security.SaslPropertiesResolver; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Charsets; import com.google.common.collect.Lists; /** * Negotiates SASL for DataTransferProtocol on behalf of a client. There are * two possible supported variants of SASL negotiation: either a general-purpose * negotiation supporting any quality of protection, or a specialized * negotiation that enforces privacy as the quality of protection using a * cryptographically strong encryption key. * * This class is used in both the HDFS client and the DataNode. The DataNode * needs it, because it acts as a client to other DataNodes during write * pipelines and block transfers. */ @InterfaceAudience.Private public class SaslDataTransferClient { private static final Logger LOG = LoggerFactory.getLogger( SaslDataTransferClient.class); private final Configuration conf; private final AtomicBoolean fallbackToSimpleAuth; private final SaslPropertiesResolver saslPropsResolver; private final TrustedChannelResolver trustedChannelResolver; /** * Creates a new SaslDataTransferClient. This constructor is used in cases * where it is not relevant to track if a secure client did a fallback to * simple auth. For intra-cluster connections between data nodes in the same * cluster, we can assume that all run under the same security configuration. * * @param conf the configuration * @param saslPropsResolver for determining properties of SASL negotiation * @param trustedChannelResolver for identifying trusted connections that do * not require SASL negotiation */ public SaslDataTransferClient(Configuration conf, SaslPropertiesResolver saslPropsResolver, TrustedChannelResolver trustedChannelResolver) { this(conf, saslPropsResolver, trustedChannelResolver, null); } /** * Creates a new SaslDataTransferClient. * * @param conf the configuration * @param saslPropsResolver for determining properties of SASL negotiation * @param trustedChannelResolver for identifying trusted connections that do * not require SASL negotiation * @param fallbackToSimpleAuth checked on each attempt at general SASL * handshake, if true forces use of simple auth */ public SaslDataTransferClient(Configuration conf, SaslPropertiesResolver saslPropsResolver, TrustedChannelResolver trustedChannelResolver, AtomicBoolean fallbackToSimpleAuth) { this.conf = conf; this.fallbackToSimpleAuth = fallbackToSimpleAuth; this.saslPropsResolver = saslPropsResolver; this.trustedChannelResolver = trustedChannelResolver; } /** * Sends client SASL negotiation for a newly allocated socket if required. * * @param socket connection socket * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param encryptionKeyFactory for creation of an encryption key * @param accessToken connection block access token * @param datanodeId ID of destination DataNode * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ public IOStreamPair newSocketSend(Socket socket, OutputStream underlyingOut, InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException { // The encryption key factory only returns a key if encryption is enabled. DataEncryptionKey encryptionKey = !trustedChannelResolver.isTrusted() ? encryptionKeyFactory.newDataEncryptionKey() : null; IOStreamPair ios = send(socket.getInetAddress(), underlyingOut, underlyingIn, encryptionKey, accessToken, datanodeId); return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut); } /** * Sends client SASL negotiation for a peer if required. * * @param peer connection peer * @param encryptionKeyFactory for creation of an encryption key * @param accessToken connection block access token * @param datanodeId ID of destination DataNode * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ public Peer peerSend(Peer peer, DataEncryptionKeyFactory encryptionKeyFactory, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException { IOStreamPair ios = checkTrustAndSend(getPeerAddress(peer), peer.getOutputStream(), peer.getInputStream(), encryptionKeyFactory, accessToken, datanodeId); // TODO: Consider renaming EncryptedPeer to SaslPeer. return ios != null ? new EncryptedPeer(peer, ios) : peer; } /** * Sends client SASL negotiation for a socket if required. * * @param socket connection socket * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param encryptionKeyFactory for creation of an encryption key * @param accessToken connection block access token * @param datanodeId ID of destination DataNode * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ public IOStreamPair socketSend(Socket socket, OutputStream underlyingOut, InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException { IOStreamPair ios = checkTrustAndSend(socket.getInetAddress(), underlyingOut, underlyingIn, encryptionKeyFactory, accessToken, datanodeId); return ios != null ? ios : new IOStreamPair(underlyingIn, underlyingOut); } /** * Checks if an address is already trusted and then sends client SASL * negotiation if required. * * @param addr connection address * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param encryptionKeyFactory for creation of an encryption key * @param accessToken connection block access token * @param datanodeId ID of destination DataNode * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair checkTrustAndSend(InetAddress addr, OutputStream underlyingOut, InputStream underlyingIn, DataEncryptionKeyFactory encryptionKeyFactory, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException { if (!trustedChannelResolver.isTrusted() && !trustedChannelResolver.isTrusted(addr)) { // The encryption key factory only returns a key if encryption is enabled. DataEncryptionKey encryptionKey = encryptionKeyFactory.newDataEncryptionKey(); return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken, datanodeId); } else { LOG.debug( "SASL client skipping handshake on trusted connection for addr = {}, " + "datanodeId = {}", addr, datanodeId); return null; } } /** * Sends client SASL negotiation if required. Determines the correct type of * SASL handshake based on configuration. * * @param addr connection address * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param encryptionKey for an encrypted SASL handshake * @param accessToken connection block access token * @param datanodeId ID of destination DataNode * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair send(InetAddress addr, OutputStream underlyingOut, InputStream underlyingIn, DataEncryptionKey encryptionKey, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException { if (encryptionKey != null) { LOG.debug( "SASL client doing encrypted handshake for addr = {}, datanodeId = {}", addr, datanodeId); return getEncryptedStreams(underlyingOut, underlyingIn, encryptionKey); } else if (!UserGroupInformation.isSecurityEnabled()) { LOG.debug( "SASL client skipping handshake in unsecured configuration for " + "addr = {}, datanodeId = {}", addr, datanodeId); return null; } else if (SecurityUtil.isPrivilegedPort(datanodeId.getXferPort())) { LOG.debug( "SASL client skipping handshake in secured configuration with " + "privileged port for addr = {}, datanodeId = {}", addr, datanodeId); return null; } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) { LOG.debug( "SASL client skipping handshake in secured configuration with " + "unsecured cluster for addr = {}, datanodeId = {}", addr, datanodeId); return null; } else if (saslPropsResolver != null) { LOG.debug( "SASL client doing general handshake for addr = {}, datanodeId = {}", addr, datanodeId); return getSaslStreams(addr, underlyingOut, underlyingIn, accessToken, datanodeId); } else { // It's a secured cluster using non-privileged ports, but no SASL. The // only way this can happen is if the DataNode has // ignore.secure.ports.for.testing configured, so this is a rare edge case. LOG.debug( "SASL client skipping handshake in secured configuration with no SASL " + "protection configured for addr = {}, datanodeId = {}", addr, datanodeId); return null; } } /** * Sends client SASL negotiation for specialized encrypted handshake. * * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param encryptionKey for an encrypted SASL handshake * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair getEncryptedStreams(OutputStream underlyingOut, InputStream underlyingIn, DataEncryptionKey encryptionKey) throws IOException { Map<String, String> saslProps = createSaslPropertiesForEncryption( encryptionKey.encryptionAlgorithm); LOG.debug("Client using encryption algorithm {}", encryptionKey.encryptionAlgorithm); String userName = getUserNameFromEncryptionKey(encryptionKey); char[] password = encryptionKeyToPassword(encryptionKey.encryptionKey); CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName, password); return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps, callbackHandler); } /** * The SASL username for an encrypted handshake consists of the keyId, * blockPoolId, and nonce with the first two encoded as Strings, and the third * encoded using Base64. The fields are each separated by a single space. * * @param encryptionKey the encryption key to encode as a SASL username. * @return encoded username containing keyId, blockPoolId, and nonce */ private static String getUserNameFromEncryptionKey( DataEncryptionKey encryptionKey) { return encryptionKey.keyId + NAME_DELIMITER + encryptionKey.blockPoolId + NAME_DELIMITER + new String(Base64.encodeBase64(encryptionKey.nonce, false), Charsets.UTF_8); } /** * Sets user name and password when asked by the client-side SASL object. */ private static final class SaslClientCallbackHandler implements CallbackHandler { private final char[] password; private final String userName; /** * Creates a new SaslClientCallbackHandler. * * @param userName SASL user name * @Param password SASL password */ public SaslClientCallbackHandler(String userName, char[] password) { this.password = password; this.userName = userName; } @Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { NameCallback nc = null; PasswordCallback pc = null; RealmCallback rc = null; for (Callback callback : callbacks) { if (callback instanceof RealmChoiceCallback) { continue; } else if (callback instanceof NameCallback) { nc = (NameCallback) callback; } else if (callback instanceof PasswordCallback) { pc = (PasswordCallback) callback; } else if (callback instanceof RealmCallback) { rc = (RealmCallback) callback; } else { throw new UnsupportedCallbackException(callback, "Unrecognized SASL client callback"); } } if (nc != null) { nc.setName(userName); } if (pc != null) { pc.setPassword(password); } if (rc != null) { rc.setText(rc.getDefaultText()); } } } /** * Sends client SASL negotiation for general-purpose handshake. * * @param addr connection address * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param accessToken connection block access token * @param datanodeId ID of destination DataNode * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair getSaslStreams(InetAddress addr, OutputStream underlyingOut, InputStream underlyingIn, Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId) throws IOException { Map<String, String> saslProps = saslPropsResolver.getClientProperties(addr); String userName = buildUserName(accessToken); char[] password = buildClientPassword(accessToken); CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName, password); return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps, callbackHandler); } /** * Builds the client's user name for the general-purpose handshake, consisting * of the base64-encoded serialized block access token identifier. Note that * this includes only the token identifier, not the token itself, which would * include the password. The password is a shared secret, and we must not * write it on the network during the SASL authentication exchange. * * @param blockToken for block access * @return SASL user name */ private static String buildUserName(Token<BlockTokenIdentifier> blockToken) { return new String(Base64.encodeBase64(blockToken.getIdentifier(), false), Charsets.UTF_8); } /** * Calculates the password on the client side for the general-purpose * handshake. The password consists of the block access token's password. * * @param blockToken for block access * @return SASL password */ private char[] buildClientPassword(Token<BlockTokenIdentifier> blockToken) { return new String(Base64.encodeBase64(blockToken.getPassword(), false), Charsets.UTF_8).toCharArray(); } /** * This method actually executes the client-side SASL handshake. * * @param underlyingOut connection output stream * @param underlyingIn connection input stream * @param userName SASL user name * @param saslProps properties of SASL negotiation * @param callbackHandler for responding to SASL callbacks * @return new pair of streams, wrapped after SASL negotiation * @throws IOException for any error */ private IOStreamPair doSaslHandshake(OutputStream underlyingOut, InputStream underlyingIn, String userName, Map<String, String> saslProps, CallbackHandler callbackHandler) throws IOException { DataOutputStream out = new DataOutputStream(underlyingOut); DataInputStream in = new DataInputStream(underlyingIn); SaslParticipant sasl= SaslParticipant.createClientSaslParticipant(userName, saslProps, callbackHandler); out.writeInt(SASL_TRANSFER_MAGIC_NUMBER); out.flush(); try { // Start of handshake - "initial response" in SASL terminology. sendSaslMessage(out, new byte[0]); // step 1 byte[] remoteResponse = readSaslMessage(in); byte[] localResponse = sasl.evaluateChallengeOrResponse(remoteResponse); List<CipherOption> cipherOptions = null; if (requestedQopContainsPrivacy(saslProps)) { // Negotiate cipher suites if configured. Currently, the only supported // cipher suite is AES/CTR/NoPadding, but the protocol allows multiple // values for future expansion. String cipherSuites = conf.get( DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY); if (cipherSuites != null && !cipherSuites.isEmpty()) { if (!cipherSuites.equals(CipherSuite.AES_CTR_NOPADDING.getName())) { throw new IOException(String.format("Invalid cipher suite, %s=%s", DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuites)); } CipherOption option = new CipherOption(CipherSuite.AES_CTR_NOPADDING); cipherOptions = Lists.newArrayListWithCapacity(1); cipherOptions.add(option); } } sendSaslMessageAndNegotiationCipherOptions(out, localResponse, cipherOptions); // step 2 (client-side only) SaslResponseWithNegotiatedCipherOption response = readSaslMessageAndNegotiatedCipherOption(in); localResponse = sasl.evaluateChallengeOrResponse(response.payload); assert localResponse == null; // SASL handshake is complete checkSaslComplete(sasl, saslProps); CipherOption cipherOption = null; if (sasl.isNegotiatedQopPrivacy()) { // Unwrap the negotiated cipher option cipherOption = unwrap(response.cipherOption, sasl); } // If negotiated cipher option is not null, we will use it to create // stream pair. return cipherOption != null ? createStreamPair( conf, cipherOption, underlyingOut, underlyingIn, false) : sasl.createStreamPair(out, in); } catch (IOException ioe) { sendGenericSaslErrorMessage(out, ioe.getMessage()); throw ioe; } } }
20,629
40.342685
95
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.crypto.CipherOption; @InterfaceAudience.Private public class SaslResponseWithNegotiatedCipherOption { final byte[] payload; final CipherOption cipherOption; public SaslResponseWithNegotiatedCipherOption(byte[] payload, CipherOption cipherOption) { this.payload = payload; this.cipherOption = cipherOption; } }
1,287
38.030303
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl; import java.io.DataInputStream; import java.io.DataOutputStream; import java.util.Map; import javax.security.auth.callback.CallbackHandler; import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; import javax.security.sasl.SaslServer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.security.SaslInputStream; import org.apache.hadoop.security.SaslOutputStream; /** * Strongly inspired by Thrift's TSaslTransport class. * * Used to abstract over the <code>SaslServer</code> and * <code>SaslClient</code> classes, which share a lot of their interface, but * unfortunately don't share a common superclass. */ @InterfaceAudience.Private class SaslParticipant { // This has to be set as part of the SASL spec, but it don't matter for // our purposes, but may not be empty. It's sent over the wire, so use // a short string. private static final String SERVER_NAME = "0"; private static final String PROTOCOL = "hdfs"; private static final String MECHANISM = "DIGEST-MD5"; // One of these will always be null. private final SaslServer saslServer; private final SaslClient saslClient; /** * Creates a SaslParticipant wrapping a SaslServer. * * @param saslProps properties of SASL negotiation * @param callbackHandler for handling all SASL callbacks * @return SaslParticipant wrapping SaslServer * @throws SaslException for any error */ public static SaslParticipant createServerSaslParticipant( Map<String, String> saslProps, CallbackHandler callbackHandler) throws SaslException { return new SaslParticipant(Sasl.createSaslServer(MECHANISM, PROTOCOL, SERVER_NAME, saslProps, callbackHandler)); } /** * Creates a SaslParticipant wrapping a SaslClient. * * @param userName SASL user name * @param saslProps properties of SASL negotiation * @param callbackHandler for handling all SASL callbacks * @return SaslParticipant wrapping SaslClient * @throws SaslException for any error */ public static SaslParticipant createClientSaslParticipant(String userName, Map<String, String> saslProps, CallbackHandler callbackHandler) throws SaslException { return new SaslParticipant(Sasl.createSaslClient(new String[] { MECHANISM }, userName, PROTOCOL, SERVER_NAME, saslProps, callbackHandler)); } /** * Private constructor wrapping a SaslServer. * * @param saslServer to wrap */ private SaslParticipant(SaslServer saslServer) { this.saslServer = saslServer; this.saslClient = null; } /** * Private constructor wrapping a SaslClient. * * @param saslClient to wrap */ private SaslParticipant(SaslClient saslClient) { this.saslServer = null; this.saslClient = saslClient; } /** * @see {@link SaslServer#evaluateResponse} * @see {@link SaslClient#evaluateChallenge} */ public byte[] evaluateChallengeOrResponse(byte[] challengeOrResponse) throws SaslException { if (saslClient != null) { return saslClient.evaluateChallenge(challengeOrResponse); } else { return saslServer.evaluateResponse(challengeOrResponse); } } /** * After successful SASL negotation, returns the negotiated quality of * protection. * * @return negotiated quality of protection */ public String getNegotiatedQop() { if (saslClient != null) { return (String) saslClient.getNegotiatedProperty(Sasl.QOP); } else { return (String) saslServer.getNegotiatedProperty(Sasl.QOP); } } /** * After successful SASL negotiation, returns whether it's QOP privacy * * @return boolean whether it's QOP privacy */ public boolean isNegotiatedQopPrivacy() { String qop = getNegotiatedQop(); return qop != null && "auth-conf".equalsIgnoreCase(qop); } /** * Wraps a byte array. * * @param bytes The array containing the bytes to wrap. * @param off The starting position at the array * @param len The number of bytes to wrap * @return byte[] wrapped bytes * @throws SaslException if the bytes cannot be successfully wrapped */ public byte[] wrap(byte[] bytes, int off, int len) throws SaslException { if (saslClient != null) { return saslClient.wrap(bytes, off, len); } else { return saslServer.wrap(bytes, off, len); } } /** * Unwraps a byte array. * * @param bytes The array containing the bytes to unwrap. * @param off The starting position at the array * @param len The number of bytes to unwrap * @return byte[] unwrapped bytes * @throws SaslException if the bytes cannot be successfully unwrapped */ public byte[] unwrap(byte[] bytes, int off, int len) throws SaslException { if (saslClient != null) { return saslClient.unwrap(bytes, off, len); } else { return saslServer.unwrap(bytes, off, len); } } /** * Returns true if SASL negotiation is complete. * * @return true if SASL negotiation is complete */ public boolean isComplete() { if (saslClient != null) { return saslClient.isComplete(); } else { return saslServer.isComplete(); } } /** * Return some input/output streams that may henceforth have their * communication encrypted, depending on the negotiated quality of protection. * * @param out output stream to wrap * @param in input stream to wrap * @return IOStreamPair wrapping the streams */ public IOStreamPair createStreamPair(DataOutputStream out, DataInputStream in) { if (saslClient != null) { return new IOStreamPair( new SaslInputStream(in, saslClient), new SaslOutputStream(out, saslClient)); } else { return new IOStreamPair( new SaslInputStream(in, saslServer), new SaslOutputStream(out, saslServer)); } } }
6,852
31.478673
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; /** * Creates a new {@link DataEncryptionKey} on demand. */ @InterfaceAudience.Private public interface DataEncryptionKeyFactory { /** * Creates a new DataEncryptionKey. * * @return DataEncryptionKey newly created * @throws IOException for any error */ DataEncryptionKey newDataEncryptionKey() throws IOException; }
1,364
34
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/InvalidMagicNumberException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl; import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.SASL_TRANSFER_MAGIC_NUMBER; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; /** * Indicates that SASL protocol negotiation expected to read a pre-defined magic * number, but the expected value was not seen. */ @InterfaceAudience.Private public class InvalidMagicNumberException extends IOException { private static final long serialVersionUID = 1L; private final boolean handshake4Encryption; /** * Creates a new InvalidMagicNumberException. * * @param magicNumber expected value */ public InvalidMagicNumberException(final int magicNumber, final boolean handshake4Encryption) { super(String.format("Received %x instead of %x from client.", magicNumber, SASL_TRANSFER_MAGIC_NUMBER)); this.handshake4Encryption = handshake4Encryption; } /** * Return true if it's handshake for encryption * * @return boolean true if it's handshake for encryption */ public boolean isHandshake4Encryption() { return handshake4Encryption; } }
1,993
33.982456
112
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.protocolPB; import java.io.IOException; import java.net.URL; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; /** * Implementation for protobuf service that forwards requests * received on {@link JournalProtocolPB} to the * {@link JournalProtocol} server implementation. */ @InterfaceAudience.Private public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolPB { /** Server side implementation to delegate the requests to */ private final QJournalProtocol impl; private final static JournalResponseProto VOID_JOURNAL_RESPONSE = JournalResponseProto.newBuilder().build(); private final static StartLogSegmentResponseProto VOID_START_LOG_SEGMENT_RESPONSE = StartLogSegmentResponseProto.newBuilder().build(); public QJournalProtocolServerSideTranslatorPB(QJournalProtocol impl) { this.impl = impl; } @Override public IsFormattedResponseProto isFormatted(RpcController controller, IsFormattedRequestProto request) throws ServiceException { try { boolean ret = impl.isFormatted( convert(request.getJid())); return IsFormattedResponseProto.newBuilder() .setIsFormatted(ret) .build(); } catch (IOException ioe) { throw new ServiceException(ioe); } } @Override public GetJournalStateResponseProto getJournalState(RpcController controller, GetJournalStateRequestProto request) throws ServiceException { try { return impl.getJournalState( convert(request.getJid())); } catch (IOException ioe) { throw new ServiceException(ioe); } } private String convert(JournalIdProto jid) { return jid.getIdentifier(); } @Override public NewEpochResponseProto newEpoch(RpcController controller, NewEpochRequestProto request) throws ServiceException { try { return impl.newEpoch( request.getJid().getIdentifier(), PBHelper.convert(request.getNsInfo()), request.getEpoch()); } catch (IOException ioe) { throw new ServiceException(ioe); } } public FormatResponseProto format(RpcController controller, FormatRequestProto request) throws ServiceException { try { impl.format(request.getJid().getIdentifier(), PBHelper.convert(request.getNsInfo())); return FormatResponseProto.getDefaultInstance(); } catch (IOException ioe) { throw new ServiceException(ioe); } } /** @see JournalProtocol#journal */ @Override public JournalResponseProto journal(RpcController unused, JournalRequestProto req) throws ServiceException { try { impl.journal(convert(req.getReqInfo()), req.getSegmentTxnId(), req.getFirstTxnId(), req.getNumTxns(), req.getRecords().toByteArray()); } catch (IOException e) { throw new ServiceException(e); } return VOID_JOURNAL_RESPONSE; } /** @see JournalProtocol#heartbeat */ @Override public HeartbeatResponseProto heartbeat(RpcController controller, HeartbeatRequestProto req) throws ServiceException { try { impl.heartbeat(convert(req.getReqInfo())); } catch (IOException e) { throw new ServiceException(e); } return HeartbeatResponseProto.getDefaultInstance(); } /** @see JournalProtocol#startLogSegment */ @Override public StartLogSegmentResponseProto startLogSegment(RpcController controller, StartLogSegmentRequestProto req) throws ServiceException { try { int layoutVersion = req.hasLayoutVersion() ? req.getLayoutVersion() : NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION; impl.startLogSegment(convert(req.getReqInfo()), req.getTxid(), layoutVersion); } catch (IOException e) { throw new ServiceException(e); } return VOID_START_LOG_SEGMENT_RESPONSE; } @Override public FinalizeLogSegmentResponseProto finalizeLogSegment( RpcController controller, FinalizeLogSegmentRequestProto req) throws ServiceException { try { impl.finalizeLogSegment(convert(req.getReqInfo()), req.getStartTxId(), req.getEndTxId()); } catch (IOException e) { throw new ServiceException(e); } return FinalizeLogSegmentResponseProto.newBuilder().build(); } @Override public PurgeLogsResponseProto purgeLogs(RpcController controller, PurgeLogsRequestProto req) throws ServiceException { try { impl.purgeLogsOlderThan(convert(req.getReqInfo()), req.getMinTxIdToKeep()); } catch (IOException e) { throw new ServiceException(e); } return PurgeLogsResponseProto.getDefaultInstance(); } @Override public GetEditLogManifestResponseProto getEditLogManifest( RpcController controller, GetEditLogManifestRequestProto request) throws ServiceException { try { return impl.getEditLogManifest( request.getJid().getIdentifier(), request.getSinceTxId(), request.getInProgressOk()); } catch (IOException e) { throw new ServiceException(e); } } @Override public PrepareRecoveryResponseProto prepareRecovery(RpcController controller, PrepareRecoveryRequestProto request) throws ServiceException { try { return impl.prepareRecovery(convert(request.getReqInfo()), request.getSegmentTxId()); } catch (IOException e) { throw new ServiceException(e); } } @Override public AcceptRecoveryResponseProto acceptRecovery(RpcController controller, AcceptRecoveryRequestProto request) throws ServiceException { try { impl.acceptRecovery(convert(request.getReqInfo()), request.getStateToAccept(), new URL(request.getFromURL())); return AcceptRecoveryResponseProto.getDefaultInstance(); } catch (IOException e) { throw new ServiceException(e); } } private RequestInfo convert( QJournalProtocolProtos.RequestInfoProto reqInfo) { return new RequestInfo( reqInfo.getJournalId().getIdentifier(), reqInfo.getEpoch(), reqInfo.getIpcSerialNumber(), reqInfo.hasCommittedTxId() ? reqInfo.getCommittedTxId() : HdfsServerConstants.INVALID_TXID); } @Override public DiscardSegmentsResponseProto discardSegments( RpcController controller, DiscardSegmentsRequestProto request) throws ServiceException { try { impl.discardSegments(convert(request.getJid()), request.getStartTxId()); return DiscardSegmentsResponseProto.getDefaultInstance(); } catch (IOException e) { throw new ServiceException(e); } } @Override public DoPreUpgradeResponseProto doPreUpgrade(RpcController controller, DoPreUpgradeRequestProto request) throws ServiceException { try { impl.doPreUpgrade(convert(request.getJid())); return DoPreUpgradeResponseProto.getDefaultInstance(); } catch (IOException e) { throw new ServiceException(e); } } @Override public DoUpgradeResponseProto doUpgrade(RpcController controller, DoUpgradeRequestProto request) throws ServiceException { StorageInfo si = PBHelper.convert(request.getSInfo(), NodeType.JOURNAL_NODE); try { impl.doUpgrade(convert(request.getJid()), si); return DoUpgradeResponseProto.getDefaultInstance(); } catch (IOException e) { throw new ServiceException(e); } } @Override public DoFinalizeResponseProto doFinalize(RpcController controller, DoFinalizeRequestProto request) throws ServiceException { try { impl.doFinalize(convert(request.getJid())); return DoFinalizeResponseProto.getDefaultInstance(); } catch (IOException e) { throw new ServiceException(e); } } @Override public CanRollBackResponseProto canRollBack(RpcController controller, CanRollBackRequestProto request) throws ServiceException { try { StorageInfo si = PBHelper.convert(request.getStorage(), NodeType.JOURNAL_NODE); Boolean result = impl.canRollBack(convert(request.getJid()), si, PBHelper.convert(request.getPrevStorage(), NodeType.JOURNAL_NODE), request.getTargetLayoutVersion()); return CanRollBackResponseProto.newBuilder() .setCanRollBack(result) .build(); } catch (IOException e) { throw new ServiceException(e); } } @Override public DoRollbackResponseProto doRollback(RpcController controller, DoRollbackRequestProto request) throws ServiceException { try { impl.doRollback(convert(request.getJid())); return DoRollbackResponseProto.getDefaultInstance(); } catch (IOException e) { throw new ServiceException(e); } } @Override public GetJournalCTimeResponseProto getJournalCTime(RpcController controller, GetJournalCTimeRequestProto request) throws ServiceException { try { Long resultCTime = impl.getJournalCTime(convert(request.getJid())); return GetJournalCTimeResponseProto.newBuilder() .setResultCTime(resultCTime) .build(); } catch (IOException e) { throw new ServiceException(e); } } }
14,503
39.741573
103
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.protocolPB; import java.io.Closeable; import java.io.IOException; import java.net.URL; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolMetaInterface; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcClientUtil; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; /** * This class is the client side translator to translate the requests made on * {@link JournalProtocol} interfaces to the RPC server implementing * {@link JournalProtocolPB}. */ @InterfaceAudience.Private @InterfaceStability.Stable public class QJournalProtocolTranslatorPB implements ProtocolMetaInterface, QJournalProtocol, Closeable { /** RpcController is not used and hence is set to null */ private final static RpcController NULL_CONTROLLER = null; private final QJournalProtocolPB rpcProxy; public QJournalProtocolTranslatorPB(QJournalProtocolPB rpcProxy) { this.rpcProxy = rpcProxy; } @Override public void close() { RPC.stopProxy(rpcProxy); } @Override public boolean isFormatted(String journalId) throws IOException { try { IsFormattedRequestProto req = IsFormattedRequestProto.newBuilder() .setJid(convertJournalId(journalId)) .build(); IsFormattedResponseProto resp = rpcProxy.isFormatted( NULL_CONTROLLER, req); return resp.getIsFormatted(); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public GetJournalStateResponseProto getJournalState(String jid) throws IOException { try { GetJournalStateRequestProto req = GetJournalStateRequestProto.newBuilder() .setJid(convertJournalId(jid)) .build(); return rpcProxy.getJournalState(NULL_CONTROLLER, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } private JournalIdProto convertJournalId(String jid) { return JournalIdProto.newBuilder() .setIdentifier(jid) .build(); } @Override public void format(String jid, NamespaceInfo nsInfo) throws IOException { try { FormatRequestProto req = FormatRequestProto.newBuilder() .setJid(convertJournalId(jid)) .setNsInfo(PBHelper.convert(nsInfo)) .build(); rpcProxy.format(NULL_CONTROLLER, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public NewEpochResponseProto newEpoch(String jid, NamespaceInfo nsInfo, long epoch) throws IOException { try { NewEpochRequestProto req = NewEpochRequestProto.newBuilder() .setJid(convertJournalId(jid)) .setNsInfo(PBHelper.convert(nsInfo)) .setEpoch(epoch) .build(); return rpcProxy.newEpoch(NULL_CONTROLLER, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public void journal(RequestInfo reqInfo, long segmentTxId, long firstTxnId, int numTxns, byte[] records) throws IOException { JournalRequestProto req = JournalRequestProto.newBuilder() .setReqInfo(convert(reqInfo)) .setSegmentTxnId(segmentTxId) .setFirstTxnId(firstTxnId) .setNumTxns(numTxns) .setRecords(PBHelper.getByteString(records)) .build(); try { rpcProxy.journal(NULL_CONTROLLER, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public void heartbeat(RequestInfo reqInfo) throws IOException { try { rpcProxy.heartbeat(NULL_CONTROLLER, HeartbeatRequestProto.newBuilder() .setReqInfo(convert(reqInfo)) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } private QJournalProtocolProtos.RequestInfoProto convert( RequestInfo reqInfo) { RequestInfoProto.Builder builder = RequestInfoProto.newBuilder() .setJournalId(convertJournalId(reqInfo.getJournalId())) .setEpoch(reqInfo.getEpoch()) .setIpcSerialNumber(reqInfo.getIpcSerialNumber()); if (reqInfo.hasCommittedTxId()) { builder.setCommittedTxId(reqInfo.getCommittedTxId()); } return builder.build(); } @Override public void startLogSegment(RequestInfo reqInfo, long txid, int layoutVersion) throws IOException { StartLogSegmentRequestProto req = StartLogSegmentRequestProto.newBuilder() .setReqInfo(convert(reqInfo)) .setTxid(txid).setLayoutVersion(layoutVersion) .build(); try { rpcProxy.startLogSegment(NULL_CONTROLLER, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public void finalizeLogSegment(RequestInfo reqInfo, long startTxId, long endTxId) throws IOException { FinalizeLogSegmentRequestProto req = FinalizeLogSegmentRequestProto.newBuilder() .setReqInfo(convert(reqInfo)) .setStartTxId(startTxId) .setEndTxId(endTxId) .build(); try { rpcProxy.finalizeLogSegment(NULL_CONTROLLER, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public void purgeLogsOlderThan(RequestInfo reqInfo, long minTxIdToKeep) throws IOException { PurgeLogsRequestProto req = PurgeLogsRequestProto.newBuilder() .setReqInfo(convert(reqInfo)) .setMinTxIdToKeep(minTxIdToKeep) .build(); try { rpcProxy.purgeLogs(NULL_CONTROLLER, req); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public GetEditLogManifestResponseProto getEditLogManifest(String jid, long sinceTxId, boolean inProgressOk) throws IOException { try { return rpcProxy.getEditLogManifest(NULL_CONTROLLER, GetEditLogManifestRequestProto.newBuilder() .setJid(convertJournalId(jid)) .setSinceTxId(sinceTxId) .setInProgressOk(inProgressOk) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo, long segmentTxId) throws IOException { try { return rpcProxy.prepareRecovery(NULL_CONTROLLER, PrepareRecoveryRequestProto.newBuilder() .setReqInfo(convert(reqInfo)) .setSegmentTxId(segmentTxId) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public void acceptRecovery(RequestInfo reqInfo, SegmentStateProto stateToAccept, URL fromUrl) throws IOException { try { rpcProxy.acceptRecovery(NULL_CONTROLLER, AcceptRecoveryRequestProto.newBuilder() .setReqInfo(convert(reqInfo)) .setStateToAccept(stateToAccept) .setFromURL(fromUrl.toExternalForm()) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } public boolean isMethodSupported(String methodName) throws IOException { return RpcClientUtil.isMethodSupported(rpcProxy, QJournalProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER, RPC.getProtocolVersion(QJournalProtocolPB.class), methodName); } @Override public void doPreUpgrade(String jid) throws IOException { try { rpcProxy.doPreUpgrade(NULL_CONTROLLER, DoPreUpgradeRequestProto.newBuilder() .setJid(convertJournalId(jid)) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException { try { rpcProxy.doUpgrade(NULL_CONTROLLER, DoUpgradeRequestProto.newBuilder() .setJid(convertJournalId(journalId)) .setSInfo(PBHelper.convert(sInfo)) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public void doFinalize(String jid) throws IOException { try { rpcProxy.doFinalize(NULL_CONTROLLER, DoFinalizeRequestProto.newBuilder() .setJid(convertJournalId(jid)) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public Boolean canRollBack(String journalId, StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) throws IOException { try { CanRollBackResponseProto response = rpcProxy.canRollBack( NULL_CONTROLLER, CanRollBackRequestProto.newBuilder() .setJid(convertJournalId(journalId)) .setStorage(PBHelper.convert(storage)) .setPrevStorage(PBHelper.convert(prevStorage)) .setTargetLayoutVersion(targetLayoutVersion) .build()); return response.getCanRollBack(); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public void doRollback(String journalId) throws IOException { try { rpcProxy.doRollback(NULL_CONTROLLER, DoRollbackRequestProto.newBuilder() .setJid(convertJournalId(journalId)) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public Long getJournalCTime(String journalId) throws IOException { try { GetJournalCTimeResponseProto response = rpcProxy.getJournalCTime( NULL_CONTROLLER, GetJournalCTimeRequestProto.newBuilder() .setJid(convertJournalId(journalId)) .build()); return response.getResultCTime(); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override public void discardSegments(String journalId, long startTxId) throws IOException { try { rpcProxy.discardSegments(NULL_CONTROLLER, DiscardSegmentsRequestProto.newBuilder() .setJid(convertJournalId(journalId)).setStartTxId(startTxId) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } }
14,664
37.090909
103
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolPB.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.protocolPB; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.QJournalProtocolService; import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.security.KerberosInfo; /** * Protocol used to journal edits to a JournalNode participating * in the quorum journal. * Note: This extends the protocolbuffer service based interface to * add annotations required for security. */ @KerberosInfo( serverPrincipal = DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, clientPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY) @ProtocolInfo(protocolName = "org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol", protocolVersion = 1) @InterfaceAudience.Private public interface QJournalProtocolPB extends QJournalProtocolService.BlockingInterface { }
1,766
41.071429
95
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.client; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; import java.net.URL; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.PriorityQueue; import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; import org.apache.hadoop.hdfs.server.namenode.JournalManager; import org.apache.hadoop.hdfs.server.namenode.JournalSet; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.StringUtils; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.protobuf.TextFormat; /** * A JournalManager that writes to a set of remote JournalNodes, * requiring a quorum of nodes to ack each write. */ @InterfaceAudience.Private public class QuorumJournalManager implements JournalManager { static final Log LOG = LogFactory.getLog(QuorumJournalManager.class); // Timeouts for which the QJM will wait for each of the following actions. private final int startSegmentTimeoutMs; private final int prepareRecoveryTimeoutMs; private final int acceptRecoveryTimeoutMs; private final int finalizeSegmentTimeoutMs; private final int selectInputStreamsTimeoutMs; private final int getJournalStateTimeoutMs; private final int newEpochTimeoutMs; private final int writeTxnsTimeoutMs; // Since these don't occur during normal operation, we can // use rather lengthy timeouts, and don't need to make them // configurable. private static final int FORMAT_TIMEOUT_MS = 60000; private static final int HASDATA_TIMEOUT_MS = 60000; private static final int CAN_ROLL_BACK_TIMEOUT_MS = 60000; private static final int FINALIZE_TIMEOUT_MS = 60000; private static final int PRE_UPGRADE_TIMEOUT_MS = 60000; private static final int ROLL_BACK_TIMEOUT_MS = 60000; private static final int UPGRADE_TIMEOUT_MS = 60000; private static final int GET_JOURNAL_CTIME_TIMEOUT_MS = 60000; private static final int DISCARD_SEGMENTS_TIMEOUT_MS = 60000; private final Configuration conf; private final URI uri; private final NamespaceInfo nsInfo; private boolean isActiveWriter; private final AsyncLoggerSet loggers; private int outputBufferCapacity = 512 * 1024; private final URLConnectionFactory connectionFactory; public QuorumJournalManager(Configuration conf, URI uri, NamespaceInfo nsInfo) throws IOException { this(conf, uri, nsInfo, IPCLoggerChannel.FACTORY); } QuorumJournalManager(Configuration conf, URI uri, NamespaceInfo nsInfo, AsyncLogger.Factory loggerFactory) throws IOException { Preconditions.checkArgument(conf != null, "must be configured"); this.conf = conf; this.uri = uri; this.nsInfo = nsInfo; this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory)); this.connectionFactory = URLConnectionFactory .newDefaultURLConnectionFactory(conf); // Configure timeouts. this.startSegmentTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_DEFAULT); this.prepareRecoveryTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_DEFAULT); this.acceptRecoveryTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_DEFAULT); this.finalizeSegmentTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_FINALIZE_SEGMENT_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_FINALIZE_SEGMENT_TIMEOUT_DEFAULT); this.selectInputStreamsTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_DEFAULT); this.getJournalStateTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_DEFAULT); this.newEpochTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_DEFAULT); this.writeTxnsTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT); } protected List<AsyncLogger> createLoggers( AsyncLogger.Factory factory) throws IOException { return createLoggers(conf, uri, nsInfo, factory); } static String parseJournalId(URI uri) { String path = uri.getPath(); Preconditions.checkArgument(path != null && !path.isEmpty(), "Bad URI '%s': must identify journal in path component", uri); String journalId = path.substring(1); checkJournalId(journalId); return journalId; } public static void checkJournalId(String jid) { Preconditions.checkArgument(jid != null && !jid.isEmpty() && !jid.contains("/") && !jid.startsWith("."), "bad journal id: " + jid); } /** * Fence any previous writers, and obtain a unique epoch number * for write-access to the journal nodes. * * @return the new, unique epoch number */ Map<AsyncLogger, NewEpochResponseProto> createNewUniqueEpoch() throws IOException { Preconditions.checkState(!loggers.isEpochEstablished(), "epoch already created"); Map<AsyncLogger, GetJournalStateResponseProto> lastPromises = loggers.waitForWriteQuorum(loggers.getJournalState(), getJournalStateTimeoutMs, "getJournalState()"); long maxPromised = Long.MIN_VALUE; for (GetJournalStateResponseProto resp : lastPromises.values()) { maxPromised = Math.max(maxPromised, resp.getLastPromisedEpoch()); } assert maxPromised >= 0; long myEpoch = maxPromised + 1; Map<AsyncLogger, NewEpochResponseProto> resps = loggers.waitForWriteQuorum(loggers.newEpoch(nsInfo, myEpoch), newEpochTimeoutMs, "newEpoch(" + myEpoch + ")"); loggers.setEpoch(myEpoch); return resps; } @Override public void format(NamespaceInfo nsInfo) throws IOException { QuorumCall<AsyncLogger,Void> call = loggers.format(nsInfo); try { call.waitFor(loggers.size(), loggers.size(), 0, FORMAT_TIMEOUT_MS, "format"); } catch (InterruptedException e) { throw new IOException("Interrupted waiting for format() response"); } catch (TimeoutException e) { throw new IOException("Timed out waiting for format() response"); } if (call.countExceptions() > 0) { call.rethrowException("Could not format one or more JournalNodes"); } } @Override public boolean hasSomeData() throws IOException { QuorumCall<AsyncLogger, Boolean> call = loggers.isFormatted(); try { call.waitFor(loggers.size(), 0, 0, HASDATA_TIMEOUT_MS, "hasSomeData"); } catch (InterruptedException e) { throw new IOException("Interrupted while determining if JNs have data"); } catch (TimeoutException e) { throw new IOException("Timed out waiting for response from loggers"); } if (call.countExceptions() > 0) { call.rethrowException( "Unable to check if JNs are ready for formatting"); } // If any of the loggers returned with a non-empty manifest, then // we should prompt for format. for (Boolean hasData : call.getResults().values()) { if (hasData) { return true; } } // Otherwise, none were formatted, we can safely format. return false; } /** * Run recovery/synchronization for a specific segment. * Postconditions: * <ul> * <li>This segment will be finalized on a majority * of nodes.</li> * <li>All nodes which contain the finalized segment will * agree on the length.</li> * </ul> * * @param segmentTxId the starting txid of the segment * @throws IOException */ private void recoverUnclosedSegment(long segmentTxId) throws IOException { Preconditions.checkArgument(segmentTxId > 0); LOG.info("Beginning recovery of unclosed segment starting at txid " + segmentTxId); // Step 1. Prepare recovery QuorumCall<AsyncLogger,PrepareRecoveryResponseProto> prepare = loggers.prepareRecovery(segmentTxId); Map<AsyncLogger, PrepareRecoveryResponseProto> prepareResponses= loggers.waitForWriteQuorum(prepare, prepareRecoveryTimeoutMs, "prepareRecovery(" + segmentTxId + ")"); LOG.info("Recovery prepare phase complete. Responses:\n" + QuorumCall.mapToString(prepareResponses)); // Determine the logger who either: // a) Has already accepted a previous proposal that's higher than any // other // // OR, if no such logger exists: // // b) Has the longest log starting at this transaction ID // TODO: we should collect any "ties" and pass the URL for all of them // when syncing, so we can tolerate failure during recovery better. Entry<AsyncLogger, PrepareRecoveryResponseProto> bestEntry = Collections.max( prepareResponses.entrySet(), SegmentRecoveryComparator.INSTANCE); AsyncLogger bestLogger = bestEntry.getKey(); PrepareRecoveryResponseProto bestResponse = bestEntry.getValue(); // Log the above decision, check invariants. if (bestResponse.hasAcceptedInEpoch()) { LOG.info("Using already-accepted recovery for segment " + "starting at txid " + segmentTxId + ": " + bestEntry); } else if (bestResponse.hasSegmentState()) { LOG.info("Using longest log: " + bestEntry); } else { // None of the responses to prepareRecovery() had a segment at the given // txid. This can happen for example in the following situation: // - 3 JNs: JN1, JN2, JN3 // - writer starts segment 101 on JN1, then crashes before // writing to JN2 and JN3 // - during newEpoch(), we saw the segment on JN1 and decide to // recover segment 101 // - before prepare(), JN1 crashes, and we only talk to JN2 and JN3, // neither of which has any entry for this log. // In this case, it is allowed to do nothing for recovery, since the // segment wasn't started on a quorum of nodes. // Sanity check: we should only get here if none of the responses had // a log. This should be a postcondition of the recovery comparator, // but a bug in the comparator might cause us to get here. for (PrepareRecoveryResponseProto resp : prepareResponses.values()) { assert !resp.hasSegmentState() : "One of the loggers had a response, but no best logger " + "was found."; } LOG.info("None of the responders had a log to recover: " + QuorumCall.mapToString(prepareResponses)); return; } SegmentStateProto logToSync = bestResponse.getSegmentState(); assert segmentTxId == logToSync.getStartTxId(); // Sanity check: none of the loggers should be aware of a higher // txid than the txid we intend to truncate to for (Map.Entry<AsyncLogger, PrepareRecoveryResponseProto> e : prepareResponses.entrySet()) { AsyncLogger logger = e.getKey(); PrepareRecoveryResponseProto resp = e.getValue(); if (resp.hasLastCommittedTxId() && resp.getLastCommittedTxId() > logToSync.getEndTxId()) { throw new AssertionError("Decided to synchronize log to " + logToSync + " but logger " + logger + " had seen txid " + resp.getLastCommittedTxId() + " committed"); } } URL syncFromUrl = bestLogger.buildURLToFetchLogs(segmentTxId); QuorumCall<AsyncLogger,Void> accept = loggers.acceptRecovery(logToSync, syncFromUrl); loggers.waitForWriteQuorum(accept, acceptRecoveryTimeoutMs, "acceptRecovery(" + TextFormat.shortDebugString(logToSync) + ")"); // If one of the loggers above missed the synchronization step above, but // we send a finalize() here, that's OK. It validates the log before // finalizing. Hence, even if it is not "in sync", it won't incorrectly // finalize. QuorumCall<AsyncLogger, Void> finalize = loggers.finalizeLogSegment(logToSync.getStartTxId(), logToSync.getEndTxId()); loggers.waitForWriteQuorum(finalize, finalizeSegmentTimeoutMs, String.format("finalizeLogSegment(%s-%s)", logToSync.getStartTxId(), logToSync.getEndTxId())); } static List<AsyncLogger> createLoggers(Configuration conf, URI uri, NamespaceInfo nsInfo, AsyncLogger.Factory factory) throws IOException { List<AsyncLogger> ret = Lists.newArrayList(); List<InetSocketAddress> addrs = getLoggerAddresses(uri); String jid = parseJournalId(uri); for (InetSocketAddress addr : addrs) { ret.add(factory.createLogger(conf, nsInfo, jid, addr)); } return ret; } private static List<InetSocketAddress> getLoggerAddresses(URI uri) throws IOException { String authority = uri.getAuthority(); Preconditions.checkArgument(authority != null && !authority.isEmpty(), "URI has no authority: " + uri); String[] parts = StringUtils.split(authority, ';'); for (int i = 0; i < parts.length; i++) { parts[i] = parts[i].trim(); } if (parts.length % 2 == 0) { LOG.warn("Quorum journal URI '" + uri + "' has an even number " + "of Journal Nodes specified. This is not recommended!"); } List<InetSocketAddress> addrs = Lists.newArrayList(); for (String addr : parts) { addrs.add(NetUtils.createSocketAddr( addr, DFSConfigKeys.DFS_JOURNALNODE_RPC_PORT_DEFAULT)); } return addrs; } @Override public EditLogOutputStream startLogSegment(long txId, int layoutVersion) throws IOException { Preconditions.checkState(isActiveWriter, "must recover segments before starting a new one"); QuorumCall<AsyncLogger, Void> q = loggers.startLogSegment(txId, layoutVersion); loggers.waitForWriteQuorum(q, startSegmentTimeoutMs, "startLogSegment(" + txId + ")"); return new QuorumOutputStream(loggers, txId, outputBufferCapacity, writeTxnsTimeoutMs); } @Override public void finalizeLogSegment(long firstTxId, long lastTxId) throws IOException { QuorumCall<AsyncLogger,Void> q = loggers.finalizeLogSegment( firstTxId, lastTxId); loggers.waitForWriteQuorum(q, finalizeSegmentTimeoutMs, String.format("finalizeLogSegment(%s-%s)", firstTxId, lastTxId)); } @Override public void setOutputBufferCapacity(int size) { outputBufferCapacity = size; } @Override public void purgeLogsOlderThan(long minTxIdToKeep) throws IOException { // This purges asynchronously -- there's no need to wait for a quorum // here, because it's always OK to fail. LOG.info("Purging remote journals older than txid " + minTxIdToKeep); loggers.purgeLogsOlderThan(minTxIdToKeep); } @Override public void recoverUnfinalizedSegments() throws IOException { Preconditions.checkState(!isActiveWriter, "already active writer"); LOG.info("Starting recovery process for unclosed journal segments..."); Map<AsyncLogger, NewEpochResponseProto> resps = createNewUniqueEpoch(); LOG.info("Successfully started new epoch " + loggers.getEpoch()); if (LOG.isDebugEnabled()) { LOG.debug("newEpoch(" + loggers.getEpoch() + ") responses:\n" + QuorumCall.mapToString(resps)); } long mostRecentSegmentTxId = Long.MIN_VALUE; for (NewEpochResponseProto r : resps.values()) { if (r.hasLastSegmentTxId()) { mostRecentSegmentTxId = Math.max(mostRecentSegmentTxId, r.getLastSegmentTxId()); } } // On a completely fresh system, none of the journals have any // segments, so there's nothing to recover. if (mostRecentSegmentTxId != Long.MIN_VALUE) { recoverUnclosedSegment(mostRecentSegmentTxId); } isActiveWriter = true; } @Override public void close() throws IOException { loggers.close(); } @Override public void selectInputStreams(Collection<EditLogInputStream> streams, long fromTxnId, boolean inProgressOk) throws IOException { QuorumCall<AsyncLogger, RemoteEditLogManifest> q = loggers.getEditLogManifest(fromTxnId, inProgressOk); Map<AsyncLogger, RemoteEditLogManifest> resps = loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs, "selectInputStreams"); LOG.debug("selectInputStream manifests:\n" + Joiner.on("\n").withKeyValueSeparator(": ").join(resps)); final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64, JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR); for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) { AsyncLogger logger = e.getKey(); RemoteEditLogManifest manifest = e.getValue(); for (RemoteEditLog remoteLog : manifest.getLogs()) { URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId()); EditLogInputStream elis = EditLogFileInputStream.fromUrl( connectionFactory, url, remoteLog.getStartTxId(), remoteLog.getEndTxId(), remoteLog.isInProgress()); allStreams.add(elis); } } JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId); } @Override public String toString() { return "QJM to " + loggers; } @VisibleForTesting AsyncLoggerSet getLoggerSetForTests() { return loggers; } @Override public void doPreUpgrade() throws IOException { QuorumCall<AsyncLogger, Void> call = loggers.doPreUpgrade(); try { call.waitFor(loggers.size(), loggers.size(), 0, PRE_UPGRADE_TIMEOUT_MS, "doPreUpgrade"); if (call.countExceptions() > 0) { call.rethrowException("Could not do pre-upgrade of one or more JournalNodes"); } } catch (InterruptedException e) { throw new IOException("Interrupted waiting for doPreUpgrade() response"); } catch (TimeoutException e) { throw new IOException("Timed out waiting for doPreUpgrade() response"); } } @Override public void doUpgrade(Storage storage) throws IOException { QuorumCall<AsyncLogger, Void> call = loggers.doUpgrade(storage); try { call.waitFor(loggers.size(), loggers.size(), 0, UPGRADE_TIMEOUT_MS, "doUpgrade"); if (call.countExceptions() > 0) { call.rethrowException("Could not perform upgrade of one or more JournalNodes"); } } catch (InterruptedException e) { throw new IOException("Interrupted waiting for doUpgrade() response"); } catch (TimeoutException e) { throw new IOException("Timed out waiting for doUpgrade() response"); } } @Override public void doFinalize() throws IOException { QuorumCall<AsyncLogger, Void> call = loggers.doFinalize(); try { call.waitFor(loggers.size(), loggers.size(), 0, FINALIZE_TIMEOUT_MS, "doFinalize"); if (call.countExceptions() > 0) { call.rethrowException("Could not finalize one or more JournalNodes"); } } catch (InterruptedException e) { throw new IOException("Interrupted waiting for doFinalize() response"); } catch (TimeoutException e) { throw new IOException("Timed out waiting for doFinalize() response"); } } @Override public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) throws IOException { QuorumCall<AsyncLogger, Boolean> call = loggers.canRollBack(storage, prevStorage, targetLayoutVersion); try { call.waitFor(loggers.size(), loggers.size(), 0, CAN_ROLL_BACK_TIMEOUT_MS, "lockSharedStorage"); if (call.countExceptions() > 0) { call.rethrowException("Could not check if roll back possible for" + " one or more JournalNodes"); } // Either they all return the same thing or this call fails, so we can // just return the first result. try { DFSUtil.assertAllResultsEqual(call.getResults().values()); } catch (AssertionError ae) { throw new IOException("Results differed for canRollBack", ae); } for (Boolean result : call.getResults().values()) { return result; } } catch (InterruptedException e) { throw new IOException("Interrupted waiting for lockSharedStorage() " + "response"); } catch (TimeoutException e) { throw new IOException("Timed out waiting for lockSharedStorage() " + "response"); } throw new AssertionError("Unreachable code."); } @Override public void doRollback() throws IOException { QuorumCall<AsyncLogger, Void> call = loggers.doRollback(); try { call.waitFor(loggers.size(), loggers.size(), 0, ROLL_BACK_TIMEOUT_MS, "doRollback"); if (call.countExceptions() > 0) { call.rethrowException("Could not perform rollback of one or more JournalNodes"); } } catch (InterruptedException e) { throw new IOException("Interrupted waiting for doFinalize() response"); } catch (TimeoutException e) { throw new IOException("Timed out waiting for doFinalize() response"); } } @Override public long getJournalCTime() throws IOException { QuorumCall<AsyncLogger, Long> call = loggers.getJournalCTime(); try { call.waitFor(loggers.size(), loggers.size(), 0, GET_JOURNAL_CTIME_TIMEOUT_MS, "getJournalCTime"); if (call.countExceptions() > 0) { call.rethrowException("Could not journal CTime for one " + "more JournalNodes"); } // Either they all return the same thing or this call fails, so we can // just return the first result. try { DFSUtil.assertAllResultsEqual(call.getResults().values()); } catch (AssertionError ae) { throw new IOException("Results differed for getJournalCTime", ae); } for (Long result : call.getResults().values()) { return result; } } catch (InterruptedException e) { throw new IOException("Interrupted waiting for getJournalCTime() " + "response"); } catch (TimeoutException e) { throw new IOException("Timed out waiting for getJournalCTime() " + "response"); } throw new AssertionError("Unreachable code."); } @Override public void discardSegments(long startTxId) throws IOException { QuorumCall<AsyncLogger, Void> call = loggers.discardSegments(startTxId); try { call.waitFor(loggers.size(), loggers.size(), 0, DISCARD_SEGMENTS_TIMEOUT_MS, "discardSegments"); if (call.countExceptions() > 0) { call.rethrowException( "Could not perform discardSegments of one or more JournalNodes"); } } catch (InterruptedException e) { throw new IOException( "Interrupted waiting for discardSegments() response"); } catch (TimeoutException e) { throw new IOException( "Timed out waiting for discardSegments() response"); } } }
25,642
37.73565
100
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/LoggerTooFarBehindException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.client; import java.io.IOException; class LoggerTooFarBehindException extends IOException { private static final long serialVersionUID = 1L; }
995
37.307692
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.client; import java.io.IOException; import java.net.InetSocketAddress; import java.net.MalformedURLException; import java.net.URI; import java.net.URL; import java.security.PrivilegedExceptionAction; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolPB; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.qjournal.server.GetJournalEditServlet; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.StopWatch; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.net.InetAddresses; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.common.util.concurrent.UncaughtExceptionHandlers; /** * Channel to a remote JournalNode using Hadoop IPC. * All of the calls are run on a separate thread, and return * {@link ListenableFuture} instances to wait for their result. * This allows calls to be bound together using the {@link QuorumCall} * class. */ @InterfaceAudience.Private public class IPCLoggerChannel implements AsyncLogger { private final Configuration conf; protected final InetSocketAddress addr; private QJournalProtocol proxy; /** * Executes tasks submitted to it serially, on a single thread, in FIFO order * (generally used for write tasks that should not be reordered). */ private final ListeningExecutorService singleThreadExecutor; /** * Executes tasks submitted to it in parallel with each other and with those * submitted to singleThreadExecutor (generally used for read tasks that can * be safely reordered and interleaved with writes). */ private final ListeningExecutorService parallelExecutor; private long ipcSerial = 0; private long epoch = -1; private long committedTxId = HdfsServerConstants.INVALID_TXID; private final String journalId; private final NamespaceInfo nsInfo; private URL httpServerURL; private final IPCLoggerChannelMetrics metrics; /** * The number of bytes of edits data still in the queue. */ private int queuedEditsSizeBytes = 0; /** * The highest txid that has been successfully logged on the remote JN. */ private long highestAckedTxId = 0; /** * Nanotime of the last time we successfully journaled some edits * to the remote node. */ private long lastAckNanos = 0; /** * Nanotime of the last time that committedTxId was update. Used * to calculate the lag in terms of time, rather than just a number * of txns. */ private long lastCommitNanos = 0; /** * The maximum number of bytes that can be pending in the queue. * This keeps the writer from hitting OOME if one of the loggers * starts responding really slowly. Eventually, the queue * overflows and it starts to treat the logger as having errored. */ private final int queueSizeLimitBytes; /** * If this logger misses some edits, or restarts in the middle of * a segment, the writer won't be able to write any more edits until * the beginning of the next segment. Upon detecting this situation, * the writer sets this flag to true to avoid sending useless RPCs. */ private boolean outOfSync = false; /** * Stopwatch which starts counting on each heartbeat that is sent */ private final StopWatch lastHeartbeatStopwatch = new StopWatch(); private static final long HEARTBEAT_INTERVAL_MILLIS = 1000; private static final long WARN_JOURNAL_MILLIS_THRESHOLD = 1000; static final Factory FACTORY = new AsyncLogger.Factory() { @Override public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo, String journalId, InetSocketAddress addr) { return new IPCLoggerChannel(conf, nsInfo, journalId, addr); } }; public IPCLoggerChannel(Configuration conf, NamespaceInfo nsInfo, String journalId, InetSocketAddress addr) { this.conf = conf; this.nsInfo = nsInfo; this.journalId = journalId; this.addr = addr; this.queueSizeLimitBytes = 1024 * 1024 * conf.getInt( DFSConfigKeys.DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY, DFSConfigKeys.DFS_QJOURNAL_QUEUE_SIZE_LIMIT_DEFAULT); singleThreadExecutor = MoreExecutors.listeningDecorator( createSingleThreadExecutor()); parallelExecutor = MoreExecutors.listeningDecorator( createParallelExecutor()); metrics = IPCLoggerChannelMetrics.create(this); } @Override public synchronized void setEpoch(long epoch) { this.epoch = epoch; } @Override public synchronized void setCommittedTxId(long txid) { Preconditions.checkArgument(txid >= committedTxId, "Trying to move committed txid backwards in client " + "old: %s new: %s", committedTxId, txid); this.committedTxId = txid; this.lastCommitNanos = System.nanoTime(); } @Override public void close() { // No more tasks may be submitted after this point. singleThreadExecutor.shutdown(); parallelExecutor.shutdown(); if (proxy != null) { // TODO: this can hang for quite some time if the client // is currently in the middle of a call to a downed JN. // We should instead do this asynchronously, and just stop // making any more calls after this point (eg clear the queue) RPC.stopProxy(proxy); } } protected QJournalProtocol getProxy() throws IOException { if (proxy != null) return proxy; proxy = createProxy(); return proxy; } protected QJournalProtocol createProxy() throws IOException { final Configuration confCopy = new Configuration(conf); // Need to set NODELAY or else batches larger than MTU can trigger // 40ms nagling delays. confCopy.setBoolean( CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY, true); RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class, ProtobufRpcEngine.class); return SecurityUtil.doAsLoginUser( new PrivilegedExceptionAction<QJournalProtocol>() { @Override public QJournalProtocol run() throws IOException { RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class, ProtobufRpcEngine.class); QJournalProtocolPB pbproxy = RPC.getProxy( QJournalProtocolPB.class, RPC.getProtocolVersion(QJournalProtocolPB.class), addr, confCopy); return new QJournalProtocolTranslatorPB(pbproxy); } }); } /** * Separated out for easy overriding in tests. */ @VisibleForTesting protected ExecutorService createSingleThreadExecutor() { return Executors.newSingleThreadExecutor( new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("Logger channel (from single-thread executor) to " + addr) .setUncaughtExceptionHandler( UncaughtExceptionHandlers.systemExit()) .build()); } /** * Separated out for easy overriding in tests. */ @VisibleForTesting protected ExecutorService createParallelExecutor() { return Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("Logger channel (from parallel executor) to " + addr) .setUncaughtExceptionHandler( UncaughtExceptionHandlers.systemExit()) .build()); } @Override public URL buildURLToFetchLogs(long segmentTxId) { Preconditions.checkArgument(segmentTxId > 0, "Invalid segment: %s", segmentTxId); Preconditions.checkState(hasHttpServerEndPoint(), "No HTTP/HTTPS endpoint"); try { String path = GetJournalEditServlet.buildPath( journalId, segmentTxId, nsInfo); return new URL(httpServerURL, path); } catch (MalformedURLException e) { // should never get here. throw new RuntimeException(e); } } private synchronized RequestInfo createReqInfo() { Preconditions.checkState(epoch > 0, "bad epoch: " + epoch); return new RequestInfo(journalId, epoch, ipcSerial++, committedTxId); } @VisibleForTesting synchronized long getNextIpcSerial() { return ipcSerial; } public synchronized int getQueuedEditsSize() { return queuedEditsSizeBytes; } public InetSocketAddress getRemoteAddress() { return addr; } /** * @return true if the server has gotten out of sync from the client, * and thus a log roll is required for this logger to successfully start * logging more edits. */ public synchronized boolean isOutOfSync() { return outOfSync; } @VisibleForTesting void waitForAllPendingCalls() throws InterruptedException { try { singleThreadExecutor.submit(new Runnable() { @Override public void run() { } }).get(); } catch (ExecutionException e) { // This can't happen! throw new AssertionError(e); } } @Override public ListenableFuture<Boolean> isFormatted() { return singleThreadExecutor.submit(new Callable<Boolean>() { @Override public Boolean call() throws IOException { return getProxy().isFormatted(journalId); } }); } @Override public ListenableFuture<GetJournalStateResponseProto> getJournalState() { return singleThreadExecutor.submit(new Callable<GetJournalStateResponseProto>() { @Override public GetJournalStateResponseProto call() throws IOException { GetJournalStateResponseProto ret = getProxy().getJournalState(journalId); constructHttpServerURI(ret); return ret; } }); } @Override public ListenableFuture<NewEpochResponseProto> newEpoch( final long epoch) { return singleThreadExecutor.submit(new Callable<NewEpochResponseProto>() { @Override public NewEpochResponseProto call() throws IOException { return getProxy().newEpoch(journalId, nsInfo, epoch); } }); } @Override public ListenableFuture<Void> sendEdits( final long segmentTxId, final long firstTxnId, final int numTxns, final byte[] data) { try { reserveQueueSpace(data.length); } catch (LoggerTooFarBehindException e) { return Futures.immediateFailedFuture(e); } // When this batch is acked, we use its submission time in order // to calculate how far we are lagging. final long submitNanos = System.nanoTime(); ListenableFuture<Void> ret = null; try { ret = singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws IOException { throwIfOutOfSync(); long rpcSendTimeNanos = System.nanoTime(); try { getProxy().journal(createReqInfo(), segmentTxId, firstTxnId, numTxns, data); } catch (IOException e) { QuorumJournalManager.LOG.warn( "Remote journal " + IPCLoggerChannel.this + " failed to " + "write txns " + firstTxnId + "-" + (firstTxnId + numTxns - 1) + ". Will try to write to this JN again after the next " + "log roll.", e); synchronized (IPCLoggerChannel.this) { outOfSync = true; } throw e; } finally { long now = System.nanoTime(); long rpcTime = TimeUnit.MICROSECONDS.convert( now - rpcSendTimeNanos, TimeUnit.NANOSECONDS); long endToEndTime = TimeUnit.MICROSECONDS.convert( now - submitNanos, TimeUnit.NANOSECONDS); metrics.addWriteEndToEndLatency(endToEndTime); metrics.addWriteRpcLatency(rpcTime); if (rpcTime / 1000 > WARN_JOURNAL_MILLIS_THRESHOLD) { QuorumJournalManager.LOG.warn( "Took " + (rpcTime / 1000) + "ms to send a batch of " + numTxns + " edits (" + data.length + " bytes) to " + "remote journal " + IPCLoggerChannel.this); } } synchronized (IPCLoggerChannel.this) { highestAckedTxId = firstTxnId + numTxns - 1; lastAckNanos = submitNanos; } return null; } }); } finally { if (ret == null) { // it didn't successfully get submitted, // so adjust the queue size back down. unreserveQueueSpace(data.length); } else { // It was submitted to the queue, so adjust the length // once the call completes, regardless of whether it // succeeds or fails. Futures.addCallback(ret, new FutureCallback<Void>() { @Override public void onFailure(Throwable t) { unreserveQueueSpace(data.length); } @Override public void onSuccess(Void t) { unreserveQueueSpace(data.length); } }); } } return ret; } private void throwIfOutOfSync() throws JournalOutOfSyncException, IOException { if (isOutOfSync()) { // Even if we're out of sync, it's useful to send an RPC // to the remote node in order to update its lag metrics, etc. heartbeatIfNecessary(); throw new JournalOutOfSyncException( "Journal disabled until next roll"); } } /** * When we've entered an out-of-sync state, it's still useful to periodically * send an empty RPC to the server, such that it has the up to date * committedTxId. This acts as a sanity check during recovery, and also allows * that node's metrics to be up-to-date about its lag. * * In the future, this method may also be used in order to check that the * current node is still the current writer, even if no edits are being * written. */ private void heartbeatIfNecessary() throws IOException { if (lastHeartbeatStopwatch.now(TimeUnit.MILLISECONDS) > HEARTBEAT_INTERVAL_MILLIS || !lastHeartbeatStopwatch.isRunning()) { try { getProxy().heartbeat(createReqInfo()); } finally { // Don't send heartbeats more often than the configured interval, // even if they fail. lastHeartbeatStopwatch.reset().start(); } } } private synchronized void reserveQueueSpace(int size) throws LoggerTooFarBehindException { Preconditions.checkArgument(size >= 0); if (queuedEditsSizeBytes + size > queueSizeLimitBytes && queuedEditsSizeBytes > 0) { throw new LoggerTooFarBehindException(); } queuedEditsSizeBytes += size; } private synchronized void unreserveQueueSpace(int size) { Preconditions.checkArgument(size >= 0); queuedEditsSizeBytes -= size; } @Override public ListenableFuture<Void> format(final NamespaceInfo nsInfo) { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws Exception { getProxy().format(journalId, nsInfo); return null; } }); } @Override public ListenableFuture<Void> startLogSegment(final long txid, final int layoutVersion) { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws IOException { getProxy().startLogSegment(createReqInfo(), txid, layoutVersion); synchronized (IPCLoggerChannel.this) { if (outOfSync) { outOfSync = false; QuorumJournalManager.LOG.info( "Restarting previously-stopped writes to " + IPCLoggerChannel.this + " in segment starting at txid " + txid); } } return null; } }); } @Override public ListenableFuture<Void> finalizeLogSegment( final long startTxId, final long endTxId) { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws IOException { throwIfOutOfSync(); getProxy().finalizeLogSegment(createReqInfo(), startTxId, endTxId); return null; } }); } @Override public ListenableFuture<Void> purgeLogsOlderThan(final long minTxIdToKeep) { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws Exception { getProxy().purgeLogsOlderThan(createReqInfo(), minTxIdToKeep); return null; } }); } @Override public ListenableFuture<RemoteEditLogManifest> getEditLogManifest( final long fromTxnId, final boolean inProgressOk) { return parallelExecutor.submit(new Callable<RemoteEditLogManifest>() { @Override public RemoteEditLogManifest call() throws IOException { GetEditLogManifestResponseProto ret = getProxy().getEditLogManifest( journalId, fromTxnId, inProgressOk); // Update the http port, since we need this to build URLs to any of the // returned logs. constructHttpServerURI(ret); return PBHelper.convert(ret.getManifest()); } }); } @Override public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery( final long segmentTxId) { return singleThreadExecutor.submit(new Callable<PrepareRecoveryResponseProto>() { @Override public PrepareRecoveryResponseProto call() throws IOException { if (!hasHttpServerEndPoint()) { // force an RPC call so we know what the HTTP port should be if it // haven't done so. GetJournalStateResponseProto ret = getProxy().getJournalState( journalId); constructHttpServerURI(ret); } return getProxy().prepareRecovery(createReqInfo(), segmentTxId); } }); } @Override public ListenableFuture<Void> acceptRecovery( final SegmentStateProto log, final URL url) { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws IOException { getProxy().acceptRecovery(createReqInfo(), log, url); return null; } }); } @Override public ListenableFuture<Void> discardSegments(final long startTxId) { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws IOException { getProxy().discardSegments(journalId, startTxId); return null; } }); } @Override public ListenableFuture<Void> doPreUpgrade() { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws IOException { getProxy().doPreUpgrade(journalId); return null; } }); } @Override public ListenableFuture<Void> doUpgrade(final StorageInfo sInfo) { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws IOException { getProxy().doUpgrade(journalId, sInfo); return null; } }); } @Override public ListenableFuture<Void> doFinalize() { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws IOException { getProxy().doFinalize(journalId); return null; } }); } @Override public ListenableFuture<Boolean> canRollBack(final StorageInfo storage, final StorageInfo prevStorage, final int targetLayoutVersion) { return singleThreadExecutor.submit(new Callable<Boolean>() { @Override public Boolean call() throws IOException { return getProxy().canRollBack(journalId, storage, prevStorage, targetLayoutVersion); } }); } @Override public ListenableFuture<Void> doRollback() { return singleThreadExecutor.submit(new Callable<Void>() { @Override public Void call() throws IOException { getProxy().doRollback(journalId); return null; } }); } @Override public ListenableFuture<Long> getJournalCTime() { return singleThreadExecutor.submit(new Callable<Long>() { @Override public Long call() throws IOException { return getProxy().getJournalCTime(journalId); } }); } @Override public String toString() { return InetAddresses.toAddrString(addr.getAddress()) + ':' + addr.getPort(); } @Override public synchronized void appendReport(StringBuilder sb) { sb.append("Written txid ").append(highestAckedTxId); long behind = getLagTxns(); if (behind > 0) { if (lastAckNanos != 0) { long lagMillis = getLagTimeMillis(); sb.append(" (" + behind + " txns/" + lagMillis + "ms behind)"); } else { sb.append(" (never written"); } } if (outOfSync) { sb.append(" (will try to re-sync on next segment)"); } } public synchronized long getLagTxns() { return Math.max(committedTxId - highestAckedTxId, 0); } public synchronized long getLagTimeMillis() { return TimeUnit.MILLISECONDS.convert( Math.max(lastCommitNanos - lastAckNanos, 0), TimeUnit.NANOSECONDS); } private void constructHttpServerURI(GetEditLogManifestResponseProto ret) { if (ret.hasFromURL()) { URI uri = URI.create(ret.getFromURL()); httpServerURL = getHttpServerURI(uri.getScheme(), uri.getPort()); } else { httpServerURL = getHttpServerURI("http", ret.getHttpPort());; } } private void constructHttpServerURI(GetJournalStateResponseProto ret) { if (ret.hasFromURL()) { URI uri = URI.create(ret.getFromURL()); httpServerURL = getHttpServerURI(uri.getScheme(), uri.getPort()); } else { httpServerURL = getHttpServerURI("http", ret.getHttpPort());; } } /** * Construct the http server based on the response. * * The fromURL field in the response specifies the endpoint of the http * server. However, the address might not be accurate since the server can * bind to multiple interfaces. Here the client plugs in the address specified * in the configuration and generates the URI. */ private URL getHttpServerURI(String scheme, int port) { try { return new URL(scheme, addr.getHostName(), port, ""); } catch (MalformedURLException e) { // Unreachable throw new RuntimeException(e); } } private boolean hasHttpServerEndPoint() { return httpServerURL != null; } }
25,101
32.648794
103
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannelMetrics.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.client; import java.net.InetSocketAddress; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableQuantiles; import com.google.common.collect.Maps; /** * The metrics for a journal from the writer's perspective. */ @Metrics(about="Journal client metrics", context="dfs") class IPCLoggerChannelMetrics { final MetricsRegistry registry = new MetricsRegistry("NameNode"); private volatile IPCLoggerChannel ch; private final MutableQuantiles[] writeEndToEndLatencyQuantiles; private final MutableQuantiles[] writeRpcLatencyQuantiles; /** * In the case of the NN transitioning between states, edit logs are closed * and reopened. Thus, the IPCLoggerChannel instance that writes to a * given JournalNode may change over the lifetime of the process. * However, metrics2 doesn't have a function to unregister a set of metrics * and fails if a new metrics class is registered with the same name * as the existing one. Hence, we have to maintain our own registry * ("multiton") here, so that we have exactly one metrics instance * per JournalNode, and switch out the pointer to the underlying * IPCLoggerChannel instance. */ private static final Map<String, IPCLoggerChannelMetrics> REGISTRY = Maps.newHashMap(); private IPCLoggerChannelMetrics(IPCLoggerChannel ch) { this.ch = ch; Configuration conf = new HdfsConfiguration(); int[] intervals = conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY); if (intervals != null) { writeEndToEndLatencyQuantiles = new MutableQuantiles[intervals.length]; writeRpcLatencyQuantiles = new MutableQuantiles[intervals.length]; for (int i = 0; i < writeEndToEndLatencyQuantiles.length; i++) { int interval = intervals[i]; writeEndToEndLatencyQuantiles[i] = registry.newQuantiles( "writesE2E" + interval + "s", "End-to-end time for write operations", "ops", "LatencyMicros", interval); writeRpcLatencyQuantiles[i] = registry.newQuantiles( "writesRpc" + interval + "s", "RPC RTT for write operations", "ops", "LatencyMicros", interval); } } else { writeEndToEndLatencyQuantiles = null; writeRpcLatencyQuantiles = null; } } private void setChannel(IPCLoggerChannel ch) { assert ch.getRemoteAddress().equals(this.ch.getRemoteAddress()); this.ch = ch; } static IPCLoggerChannelMetrics create(IPCLoggerChannel ch) { String name = getName(ch); synchronized (REGISTRY) { IPCLoggerChannelMetrics m = REGISTRY.get(name); if (m != null) { m.setChannel(ch); } else { m = new IPCLoggerChannelMetrics(ch); DefaultMetricsSystem.instance().register(name, null, m); REGISTRY.put(name, m); } return m; } } private static String getName(IPCLoggerChannel ch) { InetSocketAddress addr = ch.getRemoteAddress(); String addrStr = addr.getAddress().getHostAddress(); // IPv6 addresses have colons, which aren't allowed as part of // MBean names. Replace with '.' addrStr = addrStr.replace(':', '.'); return "IPCLoggerChannel-" + addrStr + "-" + addr.getPort(); } @Metric("Is the remote logger out of sync with the quorum") public String isOutOfSync() { return Boolean.toString(ch.isOutOfSync()); } @Metric("The number of transactions the remote log is lagging behind the " + "quorum") public long getCurrentLagTxns() { return ch.getLagTxns(); } @Metric("The number of milliseconds the remote log is lagging behind the " + "quorum") public long getLagTimeMillis() { return ch.getLagTimeMillis(); } @Metric("The number of bytes of pending data to be sent to the remote node") public int getQueuedEditsSize() { return ch.getQueuedEditsSize(); } public void addWriteEndToEndLatency(long micros) { if (writeEndToEndLatencyQuantiles != null) { for (MutableQuantiles q : writeEndToEndLatencyQuantiles) { q.add(micros); } } } public void addWriteRpcLatency(long micros) { if (writeRpcLatencyQuantiles != null) { for (MutableQuantiles q : writeRpcLatencyQuantiles) { q.add(micros); } } } }
5,509
34.548387
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.client; import java.io.IOException; import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; import org.apache.hadoop.hdfs.server.namenode.EditsDoubleBuffer; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.io.DataOutputBuffer; /** * EditLogOutputStream implementation that writes to a quorum of * remote journals. */ class QuorumOutputStream extends EditLogOutputStream { private final AsyncLoggerSet loggers; private EditsDoubleBuffer buf; private final long segmentTxId; private final int writeTimeoutMs; public QuorumOutputStream(AsyncLoggerSet loggers, long txId, int outputBufferCapacity, int writeTimeoutMs) throws IOException { super(); this.buf = new EditsDoubleBuffer(outputBufferCapacity); this.loggers = loggers; this.segmentTxId = txId; this.writeTimeoutMs = writeTimeoutMs; } @Override public void write(FSEditLogOp op) throws IOException { buf.writeOp(op); } @Override public void writeRaw(byte[] bytes, int offset, int length) throws IOException { buf.writeRaw(bytes, offset, length); } @Override public void create(int layoutVersion) throws IOException { throw new UnsupportedOperationException(); } @Override public void close() throws IOException { if (buf != null) { buf.close(); buf = null; } } @Override public void abort() throws IOException { QuorumJournalManager.LOG.warn("Aborting " + this); buf = null; close(); } @Override public void setReadyToFlush() throws IOException { buf.setReadyToFlush(); } @Override protected void flushAndSync(boolean durable) throws IOException { int numReadyBytes = buf.countReadyBytes(); if (numReadyBytes > 0) { int numReadyTxns = buf.countReadyTxns(); long firstTxToFlush = buf.getFirstReadyTxId(); assert numReadyTxns > 0; // Copy from our double-buffer into a new byte array. This is for // two reasons: // 1) The IPC code has no way of specifying to send only a slice of // a larger array. // 2) because the calls to the underlying nodes are asynchronous, we // need a defensive copy to avoid accidentally mutating the buffer // before it is sent. DataOutputBuffer bufToSend = new DataOutputBuffer(numReadyBytes); buf.flushTo(bufToSend); assert bufToSend.getLength() == numReadyBytes; byte[] data = bufToSend.getData(); assert data.length == bufToSend.getLength(); QuorumCall<AsyncLogger, Void> qcall = loggers.sendEdits( segmentTxId, firstTxToFlush, numReadyTxns, data); loggers.waitForWriteQuorum(qcall, writeTimeoutMs, "sendEdits"); // Since we successfully wrote this batch, let the loggers know. Any future // RPCs will thus let the loggers know of the most recent transaction, even // if a logger has fallen behind. loggers.setCommittedTxId(firstTxToFlush + numReadyTxns - 1); } } @Override public String generateReport() { StringBuilder sb = new StringBuilder(); sb.append("Writing segment beginning at txid " + segmentTxId + ". \n"); loggers.appendReport(sb); return sb.toString(); } @Override public String toString() { return "QuorumOutputStream starting at txid " + segmentTxId; } }
4,210
31.643411
81
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.client; import java.net.InetSocketAddress; import java.net.URL; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import com.google.common.util.concurrent.ListenableFuture; /** * Interface for a remote log which is only communicated with asynchronously. * This is essentially a wrapper around {@link QJournalProtocol} with the key * differences being: * * <ul> * <li>All methods return {@link ListenableFuture}s instead of synchronous * objects.</li> * <li>The {@link RequestInfo} objects are created by the underlying * implementation.</li> * </ul> */ interface AsyncLogger { interface Factory { AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo, String journalId, InetSocketAddress addr); } /** * Send a batch of edits to the logger. * @param segmentTxId the first txid in the current segment * @param firstTxnId the first txid of the edits. * @param numTxns the number of transactions in the batch * @param data the actual data to be sent */ public ListenableFuture<Void> sendEdits( final long segmentTxId, final long firstTxnId, final int numTxns, final byte[] data); /** * Begin writing a new log segment. * * @param txid the first txid to be written to the new log * @param layoutVersion the LayoutVersion of the log */ public ListenableFuture<Void> startLogSegment(long txid, int layoutVersion); /** * Finalize a log segment. * * @param startTxId the first txid that was written to the segment * @param endTxId the last txid that was written to the segment */ public ListenableFuture<Void> finalizeLogSegment( long startTxId, long endTxId); /** * Allow the remote node to purge edit logs earlier than this. * @param minTxIdToKeep the min txid which must be retained */ public ListenableFuture<Void> purgeLogsOlderThan(long minTxIdToKeep); /** * Format the log directory. * @param nsInfo the namespace info to format with */ public ListenableFuture<Void> format(NamespaceInfo nsInfo); /** * @return whether or not the remote node has any valid data. */ public ListenableFuture<Boolean> isFormatted(); /** * @return the state of the last epoch on the target node. */ public ListenableFuture<GetJournalStateResponseProto> getJournalState(); /** * Begin a new epoch on the target node. */ public ListenableFuture<NewEpochResponseProto> newEpoch(long epoch); /** * Fetch the list of edit logs available on the remote node. */ public ListenableFuture<RemoteEditLogManifest> getEditLogManifest( long fromTxnId, boolean inProgressOk); /** * Prepare recovery. See the HDFS-3077 design document for details. */ public ListenableFuture<PrepareRecoveryResponseProto> prepareRecovery( long segmentTxId); /** * Accept a recovery proposal. See the HDFS-3077 design document for details. */ public ListenableFuture<Void> acceptRecovery(SegmentStateProto log, URL fromUrl); /** * Set the epoch number used for all future calls. */ public void setEpoch(long e); /** * Let the logger know the highest committed txid across all loggers in the * set. This txid may be higher than the last committed txid for <em>this</em> * logger. See HDFS-3863 for details. */ public void setCommittedTxId(long txid); /** * Build an HTTP URL to fetch the log segment with the given startTxId. */ public URL buildURLToFetchLogs(long segmentTxId); /** * Tear down any resources, connections, etc. The proxy may not be used * after this point, and any in-flight RPCs may throw an exception. */ public void close(); /** * Append an HTML-formatted report for this logger's status to the provided * StringBuilder. This is displayed on the NN web UI. */ public void appendReport(StringBuilder sb); public ListenableFuture<Void> doPreUpgrade(); public ListenableFuture<Void> doUpgrade(StorageInfo sInfo); public ListenableFuture<Void> doFinalize(); public ListenableFuture<Boolean> canRollBack(StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion); public ListenableFuture<Void> doRollback(); public ListenableFuture<Long> getJournalCTime(); public ListenableFuture<Void> discardSegments(long startTxId); }
5,875
33.162791
100
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.client; import java.io.IOException; import java.util.Map; import org.apache.hadoop.util.StringUtils; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; /** * Exception thrown when too many exceptions occur while gathering * responses to a quorum call. */ class QuorumException extends IOException { /** * Create a QuorumException instance with a descriptive message detailing * the underlying exceptions, as well as any successful responses which * were returned. * @param <K> the keys for the quorum calls * @param <V> the success response type * @param successes any successful responses returned * @param exceptions the exceptions returned */ public static <K, V> QuorumException create( String simpleMsg, Map<K, V> successes, Map<K, Throwable> exceptions) { Preconditions.checkArgument(!exceptions.isEmpty(), "Must pass exceptions"); StringBuilder msg = new StringBuilder(); msg.append(simpleMsg).append(". "); if (!successes.isEmpty()) { msg.append(successes.size()).append(" successful responses:\n"); Joiner.on("\n") .useForNull("null [success]") .withKeyValueSeparator(": ") .appendTo(msg, successes); msg.append("\n"); } msg.append(exceptions.size() + " exceptions thrown:\n"); boolean isFirst = true; for (Map.Entry<K, Throwable> e : exceptions.entrySet()) { if (!isFirst) { msg.append("\n"); } isFirst = false; msg.append(e.getKey()).append(": "); if (e.getValue() instanceof RuntimeException) { msg.append(StringUtils.stringifyException(e.getValue())); } else if (e.getValue().getLocalizedMessage() != null) { msg.append(e.getValue().getLocalizedMessage()); } else { msg.append(StringUtils.stringifyException(e.getValue())); } } return new QuorumException(msg.toString()); } private QuorumException(String msg) { super(msg); } private static final long serialVersionUID = 1L; }
2,950
31.788889
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.client; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.TimeoutException; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.Time; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.protobuf.Message; import com.google.protobuf.TextFormat; /** * Represents a set of calls for which a quorum of results is needed. * @param <KEY> a key used to identify each of the outgoing calls * @param <RESULT> the type of the call result */ class QuorumCall<KEY, RESULT> { private final Map<KEY, RESULT> successes = Maps.newHashMap(); private final Map<KEY, Throwable> exceptions = Maps.newHashMap(); /** * Interval, in milliseconds, at which a log message will be made * while waiting for a quorum call. */ private static final int WAIT_PROGRESS_INTERVAL_MILLIS = 1000; /** * Start logging messages at INFO level periodically after waiting for * this fraction of the configured timeout for any call. */ private static final float WAIT_PROGRESS_INFO_THRESHOLD = 0.3f; /** * Start logging messages at WARN level after waiting for this * fraction of the configured timeout for any call. */ private static final float WAIT_PROGRESS_WARN_THRESHOLD = 0.7f; static <KEY, RESULT> QuorumCall<KEY, RESULT> create( Map<KEY, ? extends ListenableFuture<RESULT>> calls) { final QuorumCall<KEY, RESULT> qr = new QuorumCall<KEY, RESULT>(); for (final Entry<KEY, ? extends ListenableFuture<RESULT>> e : calls.entrySet()) { Preconditions.checkArgument(e.getValue() != null, "null future for key: " + e.getKey()); Futures.addCallback(e.getValue(), new FutureCallback<RESULT>() { @Override public void onFailure(Throwable t) { qr.addException(e.getKey(), t); } @Override public void onSuccess(RESULT res) { qr.addResult(e.getKey(), res); } }); } return qr; } private QuorumCall() { // Only instantiated from factory method above } /** * Wait for the quorum to achieve a certain number of responses. * * Note that, even after this returns, more responses may arrive, * causing the return value of other methods in this class to change. * * @param minResponses return as soon as this many responses have been * received, regardless of whether they are successes or exceptions * @param minSuccesses return as soon as this many successful (non-exception) * responses have been received * @param maxExceptions return as soon as this many exception responses * have been received. Pass 0 to return immediately if any exception is * received. * @param millis the number of milliseconds to wait for * @throws InterruptedException if the thread is interrupted while waiting * @throws TimeoutException if the specified timeout elapses before * achieving the desired conditions */ public synchronized void waitFor( int minResponses, int minSuccesses, int maxExceptions, int millis, String operationName) throws InterruptedException, TimeoutException { long st = Time.monotonicNow(); long nextLogTime = st + (long)(millis * WAIT_PROGRESS_INFO_THRESHOLD); long et = st + millis; while (true) { checkAssertionErrors(); if (minResponses > 0 && countResponses() >= minResponses) return; if (minSuccesses > 0 && countSuccesses() >= minSuccesses) return; if (maxExceptions >= 0 && countExceptions() > maxExceptions) return; long now = Time.monotonicNow(); if (now > nextLogTime) { long waited = now - st; String msg = String.format( "Waited %s ms (timeout=%s ms) for a response for %s", waited, millis, operationName); if (!successes.isEmpty()) { msg += ". Succeeded so far: [" + Joiner.on(",").join(successes.keySet()) + "]"; } if (!exceptions.isEmpty()) { msg += ". Exceptions so far: [" + getExceptionMapString() + "]"; } if (successes.isEmpty() && exceptions.isEmpty()) { msg += ". No responses yet."; } if (waited > millis * WAIT_PROGRESS_WARN_THRESHOLD) { QuorumJournalManager.LOG.warn(msg); } else { QuorumJournalManager.LOG.info(msg); } nextLogTime = now + WAIT_PROGRESS_INTERVAL_MILLIS; } long rem = et - now; if (rem <= 0) { throw new TimeoutException(); } rem = Math.min(rem, nextLogTime - now); rem = Math.max(rem, 1); wait(rem); } } /** * Check if any of the responses came back with an AssertionError. * If so, it re-throws it, even if there was a quorum of responses. * This code only runs if assertions are enabled for this class, * otherwise it should JIT itself away. * * This is done since AssertionError indicates programmer confusion * rather than some kind of expected issue, and thus in the context * of test cases we'd like to actually fail the test case instead of * continuing through. */ private synchronized void checkAssertionErrors() { boolean assertsEnabled = false; assert assertsEnabled = true; // sets to true if enabled if (assertsEnabled) { for (Throwable t : exceptions.values()) { if (t instanceof AssertionError) { throw (AssertionError)t; } else if (t instanceof RemoteException && ((RemoteException)t).getClassName().equals( AssertionError.class.getName())) { throw new AssertionError(t); } } } } private synchronized void addResult(KEY k, RESULT res) { successes.put(k, res); notifyAll(); } private synchronized void addException(KEY k, Throwable t) { exceptions.put(k, t); notifyAll(); } /** * @return the total number of calls for which a response has been received, * regardless of whether it threw an exception or returned a successful * result. */ public synchronized int countResponses() { return successes.size() + exceptions.size(); } /** * @return the number of calls for which a non-exception response has been * received. */ public synchronized int countSuccesses() { return successes.size(); } /** * @return the number of calls for which an exception response has been * received. */ public synchronized int countExceptions() { return exceptions.size(); } /** * @return the map of successful responses. A copy is made such that this * map will not be further mutated, even if further results arrive for the * quorum. */ public synchronized Map<KEY, RESULT> getResults() { return Maps.newHashMap(successes); } public synchronized void rethrowException(String msg) throws QuorumException { Preconditions.checkState(!exceptions.isEmpty()); throw QuorumException.create(msg, successes, exceptions); } public static <K> String mapToString( Map<K, ? extends Message> map) { StringBuilder sb = new StringBuilder(); boolean first = true; for (Map.Entry<K, ? extends Message> e : map.entrySet()) { if (!first) { sb.append("\n"); } first = false; sb.append(e.getKey()).append(": ") .append(TextFormat.shortDebugString(e.getValue())); } return sb.toString(); } /** * Return a string suitable for displaying to the user, containing * any exceptions that have been received so far. */ private String getExceptionMapString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (Map.Entry<KEY, Throwable> e : exceptions.entrySet()) { if (!first) { sb.append(", "); } first = false; sb.append(e.getKey()).append(": ") .append(e.getValue().getLocalizedMessage()); } return sb.toString(); } }
9,022
33.837838
89
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.client; import java.io.IOException; import java.net.URL; import java.util.List; import java.util.Map; import java.util.concurrent.TimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; import com.google.common.util.concurrent.ListenableFuture; /** * Wrapper around a set of Loggers, taking care of fanning out * calls to the underlying loggers and constructing corresponding * {@link QuorumCall} instances. */ class AsyncLoggerSet { static final Log LOG = LogFactory.getLog(AsyncLoggerSet.class); private final List<AsyncLogger> loggers; private static final long INVALID_EPOCH = -1; private long myEpoch = INVALID_EPOCH; public AsyncLoggerSet(List<AsyncLogger> loggers) { this.loggers = ImmutableList.copyOf(loggers); } void setEpoch(long e) { Preconditions.checkState(!isEpochEstablished(), "Epoch already established: epoch=%s", myEpoch); myEpoch = e; for (AsyncLogger l : loggers) { l.setEpoch(e); } } /** * Set the highest successfully committed txid seen by the writer. * This should be called after a successful write to a quorum, and is used * for extra sanity checks against the protocol. See HDFS-3863. */ public void setCommittedTxId(long txid) { for (AsyncLogger logger : loggers) { logger.setCommittedTxId(txid); } } /** * @return true if an epoch has been established. */ boolean isEpochEstablished() { return myEpoch != INVALID_EPOCH; } /** * @return the epoch number for this writer. This may only be called after * a successful call to {@link #createNewUniqueEpoch(NamespaceInfo)}. */ long getEpoch() { Preconditions.checkState(myEpoch != INVALID_EPOCH, "No epoch created yet"); return myEpoch; } /** * Close all of the underlying loggers. */ void close() { for (AsyncLogger logger : loggers) { logger.close(); } } void purgeLogsOlderThan(long minTxIdToKeep) { for (AsyncLogger logger : loggers) { logger.purgeLogsOlderThan(minTxIdToKeep); } } /** * Wait for a quorum of loggers to respond to the given call. If a quorum * can't be achieved, throws a QuorumException. * @param q the quorum call * @param timeoutMs the number of millis to wait * @param operationName textual description of the operation, for logging * @return a map of successful results * @throws QuorumException if a quorum doesn't respond with success * @throws IOException if the thread is interrupted or times out */ <V> Map<AsyncLogger, V> waitForWriteQuorum(QuorumCall<AsyncLogger, V> q, int timeoutMs, String operationName) throws IOException { int majority = getMajoritySize(); try { q.waitFor( loggers.size(), // either all respond majority, // or we get a majority successes majority, // or we get a majority failures, timeoutMs, operationName); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("Interrupted waiting " + timeoutMs + "ms for a " + "quorum of nodes to respond."); } catch (TimeoutException e) { throw new IOException("Timed out waiting " + timeoutMs + "ms for a " + "quorum of nodes to respond."); } if (q.countSuccesses() < majority) { q.rethrowException("Got too many exceptions to achieve quorum size " + getMajorityString()); } return q.getResults(); } /** * @return the number of nodes which are required to obtain a quorum. */ int getMajoritySize() { return loggers.size() / 2 + 1; } /** * @return a textual description of the majority size (eg "2/3" or "3/5") */ String getMajorityString() { return getMajoritySize() + "/" + loggers.size(); } /** * @return the number of loggers behind this set */ int size() { return loggers.size(); } @Override public String toString() { return "[" + Joiner.on(", ").join(loggers) + "]"; } /** * Append an HTML-formatted status readout on the current * state of the underlying loggers. * @param sb the StringBuilder to append to */ void appendReport(StringBuilder sb) { for (int i = 0, len = loggers.size(); i < len; ++i) { AsyncLogger l = loggers.get(i); if (i != 0) { sb.append(", "); } sb.append(l).append(" ("); l.appendReport(sb); sb.append(")"); } } /** * @return the (mutable) list of loggers, for use in tests to * set up spies */ @VisibleForTesting List<AsyncLogger> getLoggersForTests() { return loggers; } /////////////////////////////////////////////////////////////////////////// // The rest of this file is simply boilerplate wrappers which fan-out the // various IPC calls to the underlying AsyncLoggers and wrap the result // in a QuorumCall. /////////////////////////////////////////////////////////////////////////// public QuorumCall<AsyncLogger, GetJournalStateResponseProto> getJournalState() { Map<AsyncLogger, ListenableFuture<GetJournalStateResponseProto>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { calls.put(logger, logger.getJournalState()); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, Boolean> isFormatted() { Map<AsyncLogger, ListenableFuture<Boolean>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { calls.put(logger, logger.isFormatted()); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger,NewEpochResponseProto> newEpoch( NamespaceInfo nsInfo, long epoch) { Map<AsyncLogger, ListenableFuture<NewEpochResponseProto>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { calls.put(logger, logger.newEpoch(epoch)); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, Void> startLogSegment( long txid, int layoutVersion) { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { calls.put(logger, logger.startLogSegment(txid, layoutVersion)); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, Void> finalizeLogSegment(long firstTxId, long lastTxId) { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { calls.put(logger, logger.finalizeLogSegment(firstTxId, lastTxId)); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, Void> sendEdits( long segmentTxId, long firstTxnId, int numTxns, byte[] data) { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Void> future = logger.sendEdits(segmentTxId, firstTxnId, numTxns, data); calls.put(logger, future); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, RemoteEditLogManifest> getEditLogManifest( long fromTxnId, boolean inProgressOk) { Map<AsyncLogger, ListenableFuture<RemoteEditLogManifest>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<RemoteEditLogManifest> future = logger.getEditLogManifest(fromTxnId, inProgressOk); calls.put(logger, future); } return QuorumCall.create(calls); } QuorumCall<AsyncLogger, PrepareRecoveryResponseProto> prepareRecovery(long segmentTxId) { Map<AsyncLogger, ListenableFuture<PrepareRecoveryResponseProto>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<PrepareRecoveryResponseProto> future = logger.prepareRecovery(segmentTxId); calls.put(logger, future); } return QuorumCall.create(calls); } QuorumCall<AsyncLogger,Void> acceptRecovery(SegmentStateProto log, URL fromURL) { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Void> future = logger.acceptRecovery(log, fromURL); calls.put(logger, future); } return QuorumCall.create(calls); } QuorumCall<AsyncLogger,Void> format(NamespaceInfo nsInfo) { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Void> future = logger.format(nsInfo); calls.put(logger, future); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, Void> discardSegments(long startTxId) { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Void> future = logger.discardSegments(startTxId); calls.put(logger, future); } return QuorumCall.create(calls); } QuorumCall<AsyncLogger, Void> doPreUpgrade() { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Void> future = logger.doPreUpgrade(); calls.put(logger, future); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, Void> doUpgrade(StorageInfo sInfo) { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Void> future = logger.doUpgrade(sInfo); calls.put(logger, future); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, Void> doFinalize() { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Void> future = logger.doFinalize(); calls.put(logger, future); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, Boolean> canRollBack(StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) { Map<AsyncLogger, ListenableFuture<Boolean>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Boolean> future = logger.canRollBack(storage, prevStorage, targetLayoutVersion); calls.put(logger, future); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, Void> doRollback() { Map<AsyncLogger, ListenableFuture<Void>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Void> future = logger.doRollback(); calls.put(logger, future); } return QuorumCall.create(calls); } public QuorumCall<AsyncLogger, Long> getJournalCTime() { Map<AsyncLogger, ListenableFuture<Long>> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture<Long> future = logger.getJournalCTime(); calls.put(logger, future); } return QuorumCall.create(calls); } }
12,767
31.822622
100
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/SegmentRecoveryComparator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.client; import java.util.Comparator; import java.util.Map.Entry; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import com.google.common.base.Preconditions; import com.google.common.collect.ComparisonChain; import com.google.common.primitives.Booleans; /** * Compares responses to the prepareRecovery RPC. This is responsible for * determining the correct length to recover. */ class SegmentRecoveryComparator implements Comparator<Entry<AsyncLogger, PrepareRecoveryResponseProto>> { static final SegmentRecoveryComparator INSTANCE = new SegmentRecoveryComparator(); @Override public int compare( Entry<AsyncLogger, PrepareRecoveryResponseProto> a, Entry<AsyncLogger, PrepareRecoveryResponseProto> b) { PrepareRecoveryResponseProto r1 = a.getValue(); PrepareRecoveryResponseProto r2 = b.getValue(); // A response that has data for a segment is always better than one // that doesn't. if (r1.hasSegmentState() != r2.hasSegmentState()) { return Booleans.compare(r1.hasSegmentState(), r2.hasSegmentState()); } if (!r1.hasSegmentState()) { // Neither has a segment, so neither can be used for recover. // Call them equal. return 0; } // They both have a segment. SegmentStateProto r1Seg = r1.getSegmentState(); SegmentStateProto r2Seg = r2.getSegmentState(); Preconditions.checkArgument(r1Seg.getStartTxId() == r2Seg.getStartTxId(), "Should only be called with responses for corresponding segments: " + "%s and %s do not have the same start txid.", r1, r2); // If one is in-progress but the other is finalized, // the finalized one is greater. if (r1Seg.getIsInProgress() != r2Seg.getIsInProgress()) { return Booleans.compare(!r1Seg.getIsInProgress(), !r2Seg.getIsInProgress()); } if (!r1Seg.getIsInProgress()) { // If both are finalized, they should match lengths if (r1Seg.getEndTxId() != r2Seg.getEndTxId()) { throw new AssertionError("finalized segs with different lengths: " + r1 + ", " + r2); } return 0; } // Both are in-progress. long r1SeenEpoch = Math.max(r1.getAcceptedInEpoch(), r1.getLastWriterEpoch()); long r2SeenEpoch = Math.max(r2.getAcceptedInEpoch(), r2.getLastWriterEpoch()); return ComparisonChain.start() .compare(r1SeenEpoch, r2SeenEpoch) .compare(r1.getSegmentState().getEndTxId(), r2.getSegmentState().getEndTxId()) .result(); } }
3,522
37.293478
100
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/JournalOutOfSyncException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; @InterfaceAudience.Private public class JournalOutOfSyncException extends IOException { private static final long serialVersionUID = 1L; public JournalOutOfSyncException(String msg) { super(msg); } }
1,161
34.212121
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/JournalNotFormattedException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.protocol; import org.apache.hadoop.classification.InterfaceAudience; import java.io.IOException; /** * Exception indicating that a call has been made to a JournalNode * which is not yet formatted. */ @InterfaceAudience.Private public class JournalNotFormattedException extends IOException { private static final long serialVersionUID = 1L; public JournalNotFormattedException(String msg) { super(msg); } }
1,272
34.361111
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/RequestInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @InterfaceAudience.Private public class RequestInfo { private final String jid; private long epoch; private long ipcSerialNumber; private final long committedTxId; public RequestInfo(String jid, long epoch, long ipcSerialNumber, long committedTxId) { this.jid = jid; this.epoch = epoch; this.ipcSerialNumber = ipcSerialNumber; this.committedTxId = committedTxId; } public long getEpoch() { return epoch; } public void setEpoch(long epoch) { this.epoch = epoch; } public String getJournalId() { return jid; } public long getIpcSerialNumber() { return ipcSerialNumber; } public void setIpcSerialNumber(long ipcSerialNumber) { this.ipcSerialNumber = ipcSerialNumber; } public long getCommittedTxId() { return committedTxId; } public boolean hasCommittedTxId() { return (committedTxId != HdfsServerConstants.INVALID_TXID); } }
1,908
27.924242
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.protocol; import java.io.IOException; import java.net.URL; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.server.JournalNode; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.JournalManager; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.KerberosInfo; /** * Protocol used to communicate between {@link QuorumJournalManager} * and each {@link JournalNode}. * * This is responsible for sending edits as well as coordinating * recovery of the nodes. */ @KerberosInfo( serverPrincipal = DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, clientPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY) @InterfaceAudience.Private public interface QJournalProtocol { public static final long versionID = 1L; /** * @return true if the given journal has been formatted and * contains valid data. */ public boolean isFormatted(String journalId) throws IOException; /** * Get the current state of the journal, including the most recent * epoch number and the HTTP port. */ public GetJournalStateResponseProto getJournalState(String journalId) throws IOException; /** * Format the underlying storage for the given namespace. */ public void format(String journalId, NamespaceInfo nsInfo) throws IOException; /** * Begin a new epoch. See the HDFS-3077 design doc for details. */ public NewEpochResponseProto newEpoch(String journalId, NamespaceInfo nsInfo, long epoch) throws IOException; /** * Journal edit records. * This message is sent by the active name-node to the JournalNodes * to write edits to their local logs. */ public void journal(RequestInfo reqInfo, long segmentTxId, long firstTxnId, int numTxns, byte[] records) throws IOException; /** * Heartbeat. * This is a no-op on the server, except that it verifies that the * caller is in fact still the active writer, and provides up-to-date * information on the most recently committed txid. */ public void heartbeat(RequestInfo reqInfo) throws IOException; /** * Start writing to a new log segment on the JournalNode. * Before calling this, one should finalize the previous segment * using {@link #finalizeLogSegment(RequestInfo, long, long)}. * * @param txid the first txid in the new log * @param layoutVersion the LayoutVersion of the new log */ public void startLogSegment(RequestInfo reqInfo, long txid, int layoutVersion) throws IOException; /** * Finalize the given log segment on the JournalNode. The segment * is expected to be in-progress and starting at the given startTxId. * * @param startTxId the starting transaction ID of the log * @param endTxId the expected last transaction in the given log * @throws IOException if no such segment exists */ public void finalizeLogSegment(RequestInfo reqInfo, long startTxId, long endTxId) throws IOException; /** * @throws IOException * @see JournalManager#purgeLogsOlderThan(long) */ public void purgeLogsOlderThan(RequestInfo requestInfo, long minTxIdToKeep) throws IOException; /** * @param jid the journal from which to enumerate edits * @param sinceTxId the first transaction which the client cares about * @param inProgressOk whether or not to check the in-progress edit log * segment * @return a list of edit log segments since the given transaction ID. */ public GetEditLogManifestResponseProto getEditLogManifest(String jid, long sinceTxId, boolean inProgressOk) throws IOException; /** * Begin the recovery process for a given segment. See the HDFS-3077 * design document for details. */ public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo, long segmentTxId) throws IOException; /** * Accept a proposed recovery for the given transaction ID. */ public void acceptRecovery(RequestInfo reqInfo, SegmentStateProto stateToAccept, URL fromUrl) throws IOException; public void doPreUpgrade(String journalId) throws IOException; public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException; public void doFinalize(String journalId) throws IOException; public Boolean canRollBack(String journalId, StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) throws IOException; public void doRollback(String journalId) throws IOException; public Long getJournalCTime(String journalId) throws IOException; /** * Discard journal segments whose first TxId is greater than or equal to the * given txid. */ @Idempotent public void discardSegments(String journalId, long startTxId) throws IOException; }
6,453
36.74269
103
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.server; import java.io.File; import java.io.IOException; import java.util.List; import java.util.Properties; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager; import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import com.google.common.collect.ImmutableList; /** * A {@link Storage} implementation for the {@link JournalNode}. * * The JN has a storage directory for each namespace for which it stores * metadata. There is only a single directory per JN in the current design. */ class JNStorage extends Storage { private final FileJournalManager fjm; private final StorageDirectory sd; private StorageState state; private static final List<Pattern> CURRENT_DIR_PURGE_REGEXES = ImmutableList.of( Pattern.compile("edits_\\d+-(\\d+)"), Pattern.compile("edits_inprogress_(\\d+)(?:\\..*)?")); private static final List<Pattern> PAXOS_DIR_PURGE_REGEXES = ImmutableList.of(Pattern.compile("(\\d+)")); /** * @param conf Configuration object * @param logDir the path to the directory in which data will be stored * @param errorReporter a callback to report errors * @throws IOException */ protected JNStorage(Configuration conf, File logDir, StartupOption startOpt, StorageErrorReporter errorReporter) throws IOException { super(NodeType.JOURNAL_NODE); sd = new StorageDirectory(logDir); this.addStorageDir(sd); this.fjm = new FileJournalManager(conf, sd, errorReporter); analyzeAndRecoverStorage(startOpt); } FileJournalManager getJournalManager() { return fjm; } @Override public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException { return false; } /** * Find an edits file spanning the given transaction ID range. * If no such file exists, an exception is thrown. */ File findFinalizedEditsFile(long startTxId, long endTxId) throws IOException { File ret = new File(sd.getCurrentDir(), NNStorage.getFinalizedEditsFileName(startTxId, endTxId)); if (!ret.exists()) { throw new IOException( "No edits file for range " + startTxId + "-" + endTxId); } return ret; } /** * @return the path for an in-progress edits file starting at the given * transaction ID. This does not verify existence of the file. */ File getInProgressEditLog(long startTxId) { return new File(sd.getCurrentDir(), NNStorage.getInProgressEditsFileName(startTxId)); } /** * @param segmentTxId the first txid of the segment * @param epoch the epoch number of the writer which is coordinating * recovery * @return the temporary path in which an edits log should be stored * while it is being downloaded from a remote JournalNode */ File getSyncLogTemporaryFile(long segmentTxId, long epoch) { String name = NNStorage.getInProgressEditsFileName(segmentTxId) + ".epoch=" + epoch; return new File(sd.getCurrentDir(), name); } /** * @return the path for the file which contains persisted data for the * paxos-like recovery process for the given log segment. */ File getPaxosFile(long segmentTxId) { return new File(getPaxosDir(), String.valueOf(segmentTxId)); } File getPaxosDir() { return new File(sd.getCurrentDir(), "paxos"); } File getRoot() { return sd.getRoot(); } /** * Remove any log files and associated paxos files which are older than * the given txid. */ void purgeDataOlderThan(long minTxIdToKeep) throws IOException { purgeMatching(sd.getCurrentDir(), CURRENT_DIR_PURGE_REGEXES, minTxIdToKeep); purgeMatching(getPaxosDir(), PAXOS_DIR_PURGE_REGEXES, minTxIdToKeep); } /** * Purge files in the given directory which match any of the set of patterns. * The patterns must have a single numeric capture group which determines * the associated transaction ID of the file. Only those files for which * the transaction ID is less than the <code>minTxIdToKeep</code> parameter * are removed. */ private static void purgeMatching(File dir, List<Pattern> patterns, long minTxIdToKeep) throws IOException { for (File f : FileUtil.listFiles(dir)) { if (!f.isFile()) continue; for (Pattern p : patterns) { Matcher matcher = p.matcher(f.getName()); if (matcher.matches()) { // This parsing will always succeed since the group(1) is // /\d+/ in the regex itself. long txid = Long.parseLong(matcher.group(1)); if (txid < minTxIdToKeep) { LOG.info("Purging no-longer needed file " + txid); if (!f.delete()) { LOG.warn("Unable to delete no-longer-needed data " + f); } break; } } } } } void format(NamespaceInfo nsInfo) throws IOException { setStorageInfo(nsInfo); LOG.info("Formatting journal " + sd + " with nsid: " + getNamespaceID()); // Unlock the directory before formatting, because we will // re-analyze it after format(). The analyzeStorage() call // below is reponsible for re-locking it. This is a no-op // if the storage is not currently locked. unlockAll(); sd.clearDirectory(); writeProperties(sd); createPaxosDir(); analyzeStorage(); } void createPaxosDir() throws IOException { if (!getPaxosDir().mkdirs()) { throw new IOException("Could not create paxos dir: " + getPaxosDir()); } } void analyzeStorage() throws IOException { this.state = sd.analyzeStorage(StartupOption.REGULAR, this); if (state == StorageState.NORMAL) { readProperties(sd); } } @Override protected void setLayoutVersion(Properties props, StorageDirectory sd) throws IncorrectVersionException, InconsistentFSStateException { int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion")); // For journal node, since it now does not decode but just scan through the // edits, it can handle edits with future version in most of the cases. // Thus currently we may skip the layoutVersion check here. layoutVersion = lv; } void analyzeAndRecoverStorage(StartupOption startOpt) throws IOException { this.state = sd.analyzeStorage(startOpt, this); final boolean needRecover = state != StorageState.NORMAL && state != StorageState.NON_EXISTENT && state != StorageState.NOT_FORMATTED; if (state == StorageState.NORMAL && startOpt != StartupOption.ROLLBACK) { readProperties(sd); } else if (needRecover) { sd.doRecover(state); } } void checkConsistentNamespace(NamespaceInfo nsInfo) throws IOException { if (nsInfo.getNamespaceID() != getNamespaceID()) { throw new IOException("Incompatible namespaceID for journal " + this.sd + ": NameNode has nsId " + nsInfo.getNamespaceID() + " but storage has nsId " + getNamespaceID()); } if (!nsInfo.getClusterID().equals(getClusterID())) { throw new IOException("Incompatible clusterID for journal " + this.sd + ": NameNode has clusterId '" + nsInfo.getClusterID() + "' but storage has clusterId '" + getClusterID() + "'"); } } public void close() throws IOException { LOG.info("Closing journal storage for " + sd); unlockAll(); } public boolean isFormatted() { return state == StorageState.NORMAL; } }
8,938
33.917969
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.server; import java.io.File; import java.io.FileFilter; import java.io.IOException; import java.net.InetSocketAddress; import java.util.HashMap; import java.util.Map; import javax.management.ObjectName; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.mortbay.util.ajax.JSON; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; /** * The JournalNode is a daemon which allows namenodes using * the QuorumJournalManager to log and retrieve edits stored * remotely. It is a thin wrapper around a local edit log * directory with the addition of facilities to participate * in the quorum protocol. */ @InterfaceAudience.Private public class JournalNode implements Tool, Configurable, JournalNodeMXBean { public static final Log LOG = LogFactory.getLog(JournalNode.class); private Configuration conf; private JournalNodeRpcServer rpcServer; private JournalNodeHttpServer httpServer; private final Map<String, Journal> journalsById = Maps.newHashMap(); private ObjectName journalNodeInfoBeanName; private String httpServerURI; private File localDir; static { HdfsConfiguration.init(); } /** * When stopped, the daemon will exit with this code. */ private int resultCode = 0; synchronized Journal getOrCreateJournal(String jid, StartupOption startOpt) throws IOException { QuorumJournalManager.checkJournalId(jid); Journal journal = journalsById.get(jid); if (journal == null) { File logDir = getLogDir(jid); LOG.info("Initializing journal in directory " + logDir); journal = new Journal(conf, logDir, jid, startOpt, new ErrorReporter()); journalsById.put(jid, journal); } return journal; } @VisibleForTesting public Journal getOrCreateJournal(String jid) throws IOException { return getOrCreateJournal(jid, StartupOption.REGULAR); } @Override public void setConf(Configuration conf) { this.conf = conf; this.localDir = new File( conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_DEFAULT).trim()); } private static void validateAndCreateJournalDir(File dir) throws IOException { if (!dir.isAbsolute()) { throw new IllegalArgumentException( "Journal dir '" + dir + "' should be an absolute path"); } DiskChecker.checkDir(dir); } @Override public Configuration getConf() { return conf; } @Override public int run(String[] args) throws Exception { start(); return join(); } /** * Start listening for edits via RPC. */ public void start() throws IOException { Preconditions.checkState(!isStarted(), "JN already running"); validateAndCreateJournalDir(localDir); DefaultMetricsSystem.initialize("JournalNode"); JvmMetrics.create("JournalNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance()); InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf); SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); registerJNMXBean(); httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); httpServerURI = httpServer.getServerURI().toString(); rpcServer = new JournalNodeRpcServer(conf, this); rpcServer.start(); } public boolean isStarted() { return rpcServer != null; } /** * @return the address the IPC server is bound to */ public InetSocketAddress getBoundIpcAddress() { return rpcServer.getAddress(); } @Deprecated public InetSocketAddress getBoundHttpAddress() { return httpServer.getAddress(); } public String getHttpServerURI() { return httpServerURI; } /** * Stop the daemon with the given status code * @param rc the status code with which to exit (non-zero * should indicate an error) */ public void stop(int rc) { this.resultCode = rc; if (rpcServer != null) { rpcServer.stop(); } if (httpServer != null) { try { httpServer.stop(); } catch (IOException ioe) { LOG.warn("Unable to stop HTTP server for " + this, ioe); } } for (Journal j : journalsById.values()) { IOUtils.cleanup(LOG, j); } if (journalNodeInfoBeanName != null) { MBeans.unregister(journalNodeInfoBeanName); journalNodeInfoBeanName = null; } } /** * Wait for the daemon to exit. * @return the result code (non-zero if error) */ int join() throws InterruptedException { if (rpcServer != null) { rpcServer.join(); } return resultCode; } public void stopAndJoin(int rc) throws InterruptedException { stop(rc); join(); } /** * Return the directory inside our configured storage * dir which corresponds to a given journal. * @param jid the journal identifier * @return the file, which may or may not exist yet */ private File getLogDir(String jid) { String dir = conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY, DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_DEFAULT); Preconditions.checkArgument(jid != null && !jid.isEmpty(), "bad journal identifier: %s", jid); assert jid != null; return new File(new File(dir), jid); } @Override // JournalNodeMXBean public String getJournalsStatus() { // jid:{Formatted:True/False} Map<String, Map<String, String>> status = new HashMap<String, Map<String, String>>(); synchronized (this) { for (Map.Entry<String, Journal> entry : journalsById.entrySet()) { Map<String, String> jMap = new HashMap<String, String>(); jMap.put("Formatted", Boolean.toString(entry.getValue().isFormatted())); status.put(entry.getKey(), jMap); } } // It is possible that some journals have been formatted before, while the // corresponding journals are not in journalsById yet (because of restarting // JN, e.g.). For simplicity, let's just assume a journal is formatted if // there is a directory for it. We can also call analyzeStorage method for // these directories if necessary. // Also note that we do not need to check localDir here since // validateAndCreateJournalDir has been called before we register the // MXBean. File[] journalDirs = localDir.listFiles(new FileFilter() { @Override public boolean accept(File file) { return file.isDirectory(); } }); for (File journalDir : journalDirs) { String jid = journalDir.getName(); if (!status.containsKey(jid)) { Map<String, String> jMap = new HashMap<String, String>(); jMap.put("Formatted", "true"); status.put(jid, jMap); } } return JSON.toString(status); } /** * Register JournalNodeMXBean */ private void registerJNMXBean() { journalNodeInfoBeanName = MBeans.register("JournalNode", "JournalNodeInfo", this); } private class ErrorReporter implements StorageErrorReporter { @Override public void reportErrorOnFile(File f) { LOG.fatal("Error reported on file " + f + "... exiting", new Exception()); stop(1); } } public static void main(String[] args) throws Exception { StringUtils.startupShutdownMessage(JournalNode.class, args, LOG); System.exit(ToolRunner.run(new JournalNode(), args)); } public void discardSegments(String journalId, long startTxId) throws IOException { getOrCreateJournal(journalId).discardSegments(startTxId); } public void doPreUpgrade(String journalId) throws IOException { getOrCreateJournal(journalId).doPreUpgrade(); } public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException { getOrCreateJournal(journalId).doUpgrade(sInfo); } public void doFinalize(String journalId) throws IOException { getOrCreateJournal(journalId).doFinalize(); } public Boolean canRollBack(String journalId, StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) throws IOException { return getOrCreateJournal(journalId, StartupOption.ROLLBACK).canRollBack( storage, prevStorage, targetLayoutVersion); } public void doRollback(String journalId) throws IOException { getOrCreateJournal(journalId, StartupOption.ROLLBACK).doRollback(); } public Long getJournalCTime(String journalId) throws IOException { return getOrCreateJournal(journalId).getJournalCTime(); } }
10,498
30.719033
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/GetJournalEditServlet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.server; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.util.HashSet; import java.util.Set; import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.ImageServlet; import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.StringUtils; /** * This servlet is used in two cases: * <ul> * <li>The QuorumJournalManager, when reading edits, fetches the edit streams * from the journal nodes.</li> * <li>During edits synchronization, one journal node will fetch edits from * another journal node.</li> * </ul> */ @InterfaceAudience.Private public class GetJournalEditServlet extends HttpServlet { private static final long serialVersionUID = -4635891628211723009L; private static final Log LOG = LogFactory.getLog(GetJournalEditServlet.class); static final String STORAGEINFO_PARAM = "storageInfo"; static final String JOURNAL_ID_PARAM = "jid"; static final String SEGMENT_TXID_PARAM = "segmentTxId"; protected boolean isValidRequestor(HttpServletRequest request, Configuration conf) throws IOException { String remotePrincipal = request.getUserPrincipal().getName(); String remoteShortName = request.getRemoteUser(); if (remotePrincipal == null) { // This really shouldn't happen... LOG.warn("Received null remoteUser while authorizing access to " + "GetJournalEditServlet"); return false; } if (LOG.isDebugEnabled()) { LOG.debug("Validating request made by " + remotePrincipal + " / " + remoteShortName + ". This user is: " + UserGroupInformation.getLoginUser()); } Set<String> validRequestors = new HashSet<String>(); validRequestors.addAll(DFSUtil.getAllNnPrincipals(conf)); try { validRequestors.add( SecurityUtil.getServerPrincipal(conf .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY), SecondaryNameNode.getHttpAddress(conf).getHostName())); } catch (Exception e) { // Don't halt if SecondaryNameNode principal could not be added. LOG.debug("SecondaryNameNode principal could not be added", e); String msg = String.format( "SecondaryNameNode principal not considered, %s = %s, %s = %s", DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY), DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT)); LOG.warn(msg); } // Check the full principal name of all the configured valid requestors. for (String v : validRequestors) { if (LOG.isDebugEnabled()) LOG.debug("isValidRequestor is comparing to valid requestor: " + v); if (v != null && v.equals(remotePrincipal)) { if (LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing: " + remotePrincipal); return true; } } // Additionally, we compare the short name of the requestor to this JN's // username, because we want to allow requests from other JNs during // recovery, but we can't enumerate the full list of JNs. if (remoteShortName.equals( UserGroupInformation.getLoginUser().getShortUserName())) { if (LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing other JN principal: " + remotePrincipal); return true; } if (LOG.isDebugEnabled()) LOG.debug("isValidRequestor is rejecting: " + remotePrincipal); return false; } private boolean checkRequestorOrSendError(Configuration conf, HttpServletRequest request, HttpServletResponse response) throws IOException { if (UserGroupInformation.isSecurityEnabled() && !isValidRequestor(request, conf)) { response.sendError(HttpServletResponse.SC_FORBIDDEN, "Only Namenode and another JournalNode may access this servlet"); LOG.warn("Received non-NN/JN request for edits from " + request.getRemoteHost()); return false; } return true; } private boolean checkStorageInfoOrSendError(JNStorage storage, HttpServletRequest request, HttpServletResponse response) throws IOException { int myNsId = storage.getNamespaceID(); String myClusterId = storage.getClusterID(); String theirStorageInfoString = StringEscapeUtils.escapeHtml( request.getParameter(STORAGEINFO_PARAM)); if (theirStorageInfoString != null) { int theirNsId = StorageInfo.getNsIdFromColonSeparatedString( theirStorageInfoString); String theirClusterId = StorageInfo.getClusterIdFromColonSeparatedString( theirStorageInfoString); if (myNsId != theirNsId || !myClusterId.equals(theirClusterId)) { String msg = "This node has namespaceId '" + myNsId + " and clusterId '" + myClusterId + "' but the requesting node expected '" + theirNsId + "' and '" + theirClusterId + "'"; response.sendError(HttpServletResponse.SC_FORBIDDEN, msg); LOG.warn("Received an invalid request file transfer request from " + request.getRemoteAddr() + ": " + msg); return false; } } return true; } @Override public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { FileInputStream editFileIn = null; try { final ServletContext context = getServletContext(); final Configuration conf = (Configuration) getServletContext() .getAttribute(JspHelper.CURRENT_CONF); final String journalId = request.getParameter(JOURNAL_ID_PARAM); QuorumJournalManager.checkJournalId(journalId); final JNStorage storage = JournalNodeHttpServer .getJournalFromContext(context, journalId).getStorage(); // Check security if (!checkRequestorOrSendError(conf, request, response)) { return; } // Check that the namespace info is correct if (!checkStorageInfoOrSendError(storage, request, response)) { return; } long segmentTxId = ServletUtil.parseLongParam(request, SEGMENT_TXID_PARAM); FileJournalManager fjm = storage.getJournalManager(); File editFile; synchronized (fjm) { // Synchronize on the FJM so that the file doesn't get finalized // out from underneath us while we're in the process of opening // it up. EditLogFile elf = fjm.getLogFile( segmentTxId); if (elf == null) { response.sendError(HttpServletResponse.SC_NOT_FOUND, "No edit log found starting at txid " + segmentTxId); return; } editFile = elf.getFile(); ImageServlet.setVerificationHeadersForGet(response, editFile); ImageServlet.setFileNameHeaders(response, editFile); editFileIn = new FileInputStream(editFile); } DataTransferThrottler throttler = ImageServlet.getThrottler(conf); // send edits TransferFsImage.copyFileToStream(response.getOutputStream(), editFile, editFileIn, throttler); } catch (Throwable t) { String errMsg = "getedit failed. " + StringUtils.stringifyException(t); response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, errMsg); throw new IOException(errMsg); } finally { IOUtils.closeStream(editFileIn); } } public static String buildPath(String journalId, long segmentTxId, NamespaceInfo nsInfo) { StringBuilder path = new StringBuilder("/getJournal?"); try { path.append(JOURNAL_ID_PARAM).append("=") .append(URLEncoder.encode(journalId, "UTF-8")); path.append("&" + SEGMENT_TXID_PARAM).append("=") .append(segmentTxId); path.append("&" + STORAGEINFO_PARAM).append("=") .append(URLEncoder.encode(nsInfo.toColonSeparatedString(), "UTF-8")); } catch (UnsupportedEncodingException e) { // Never get here -- everyone supports UTF-8 throw new RuntimeException(e); } return path.toString(); } }
10,414
39.368217
84
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.server; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * This is the JMX management interface for JournalNode information */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface JournalNodeMXBean { /** * Get status information (e.g., whether formatted) of JournalNode's journals. * * @return A string presenting status for each journal */ public String getJournalsStatus(); }
1,342
35.297297
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.server; import java.io.Closeable; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStreamWriter; import java.net.URL; import java.security.PrivilegedExceptionAction; import java.util.Iterator; import java.util.List; import java.util.concurrent.TimeUnit; import org.apache.commons.lang.math.LongRange; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException; import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager; import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.JournalManager; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.util.AtomicFileOutputStream; import org.apache.hadoop.hdfs.util.BestEffortLongFile; import org.apache.hadoop.hdfs.util.PersistentLongFile; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StopWatch; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.protobuf.TextFormat; /** * A JournalNode can manage journals for several clusters at once. * Each such journal is entirely independent despite being hosted by * the same JVM. */ public class Journal implements Closeable { static final Log LOG = LogFactory.getLog(Journal.class); // Current writing state private EditLogOutputStream curSegment; private long curSegmentTxId = HdfsServerConstants.INVALID_TXID; private long nextTxId = HdfsServerConstants.INVALID_TXID; private long highestWrittenTxId = 0; private final String journalId; private final JNStorage storage; /** * When a new writer comes along, it asks each node to promise * to ignore requests from any previous writer, as identified * by epoch number. In order to make such a promise, the epoch * number of that writer is stored persistently on disk. */ private PersistentLongFile lastPromisedEpoch; /** * Each IPC that comes from a given client contains a serial number * which only increases from the client's perspective. Whenever * we switch epochs, we reset this back to -1. Whenever an IPC * comes from a client, we ensure that it is strictly higher * than any previous IPC. This guards against any bugs in the IPC * layer that would re-order IPCs or cause a stale retry from an old * request to resurface and confuse things. */ private long currentEpochIpcSerial = -1; /** * The epoch number of the last writer to actually write a transaction. * This is used to differentiate log segments after a crash at the very * beginning of a segment. See the the 'testNewerVersionOfSegmentWins' * test case. */ private PersistentLongFile lastWriterEpoch; /** * Lower-bound on the last committed transaction ID. This is not * depended upon for correctness, but acts as a sanity check * during the recovery procedures, and as a visibility mark * for clients reading in-progress logs. */ private BestEffortLongFile committedTxnId; public static final String LAST_PROMISED_FILENAME = "last-promised-epoch"; public static final String LAST_WRITER_EPOCH = "last-writer-epoch"; private static final String COMMITTED_TXID_FILENAME = "committed-txid"; private final FileJournalManager fjm; private final JournalMetrics metrics; /** * Time threshold for sync calls, beyond which a warning should be logged to the console. */ private static final int WARN_SYNC_MILLIS_THRESHOLD = 1000; Journal(Configuration conf, File logDir, String journalId, StartupOption startOpt, StorageErrorReporter errorReporter) throws IOException { storage = new JNStorage(conf, logDir, startOpt, errorReporter); this.journalId = journalId; refreshCachedData(); this.fjm = storage.getJournalManager(); this.metrics = JournalMetrics.create(this); EditLogFile latest = scanStorageForLatestEdits(); if (latest != null) { highestWrittenTxId = latest.getLastTxId(); } } /** * Reload any data that may have been cached. This is necessary * when we first load the Journal, but also after any formatting * operation, since the cached data is no longer relevant. */ private synchronized void refreshCachedData() { IOUtils.closeStream(committedTxnId); File currentDir = storage.getSingularStorageDir().getCurrentDir(); this.lastPromisedEpoch = new PersistentLongFile( new File(currentDir, LAST_PROMISED_FILENAME), 0); this.lastWriterEpoch = new PersistentLongFile( new File(currentDir, LAST_WRITER_EPOCH), 0); this.committedTxnId = new BestEffortLongFile( new File(currentDir, COMMITTED_TXID_FILENAME), HdfsServerConstants.INVALID_TXID); } /** * Scan the local storage directory, and return the segment containing * the highest transaction. * @return the EditLogFile with the highest transactions, or null * if no files exist. */ private synchronized EditLogFile scanStorageForLatestEdits() throws IOException { if (!fjm.getStorageDirectory().getCurrentDir().exists()) { return null; } LOG.info("Scanning storage " + fjm); List<EditLogFile> files = fjm.getLogFiles(0); while (!files.isEmpty()) { EditLogFile latestLog = files.remove(files.size() - 1); latestLog.scanLog(); LOG.info("Latest log is " + latestLog); if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) { // the log contains no transactions LOG.warn("Latest log " + latestLog + " has no transactions. " + "moving it aside and looking for previous log"); latestLog.moveAsideEmptyFile(); } else { return latestLog; } } LOG.info("No files in " + fjm); return null; } /** * Format the local storage with the given namespace. */ void format(NamespaceInfo nsInfo) throws IOException { Preconditions.checkState(nsInfo.getNamespaceID() != 0, "can't format with uninitialized namespace info: %s", nsInfo); LOG.info("Formatting " + this + " with namespace info: " + nsInfo); storage.format(nsInfo); refreshCachedData(); } /** * Unlock and release resources. */ @Override // Closeable public void close() throws IOException { storage.close(); IOUtils.closeStream(committedTxnId); IOUtils.closeStream(curSegment); } JNStorage getStorage() { return storage; } String getJournalId() { return journalId; } /** * @return the last epoch which this node has promised not to accept * any lower epoch, or 0 if no promises have been made. */ synchronized long getLastPromisedEpoch() throws IOException { checkFormatted(); return lastPromisedEpoch.get(); } synchronized public long getLastWriterEpoch() throws IOException { checkFormatted(); return lastWriterEpoch.get(); } synchronized long getCommittedTxnIdForTests() throws IOException { return committedTxnId.get(); } synchronized long getCurrentLagTxns() throws IOException { long committed = committedTxnId.get(); if (committed == 0) { return 0; } return Math.max(committed - highestWrittenTxId, 0L); } synchronized long getHighestWrittenTxId() { return highestWrittenTxId; } @VisibleForTesting JournalMetrics getMetricsForTests() { return metrics; } /** * Try to create a new epoch for this journal. * @param nsInfo the namespace, which is verified for consistency or used to * format, if the Journal has not yet been written to. * @param epoch the epoch to start * @return the status information necessary to begin recovery * @throws IOException if the node has already made a promise to another * writer with a higher epoch number, if the namespace is inconsistent, * or if a disk error occurs. */ synchronized NewEpochResponseProto newEpoch( NamespaceInfo nsInfo, long epoch) throws IOException { checkFormatted(); storage.checkConsistentNamespace(nsInfo); // Check that the new epoch being proposed is in fact newer than // any other that we've promised. if (epoch <= getLastPromisedEpoch()) { throw new IOException("Proposed epoch " + epoch + " <= last promise " + getLastPromisedEpoch()); } updateLastPromisedEpoch(epoch); abortCurSegment(); NewEpochResponseProto.Builder builder = NewEpochResponseProto.newBuilder(); EditLogFile latestFile = scanStorageForLatestEdits(); if (latestFile != null) { builder.setLastSegmentTxId(latestFile.getFirstTxId()); } return builder.build(); } private void updateLastPromisedEpoch(long newEpoch) throws IOException { LOG.info("Updating lastPromisedEpoch from " + lastPromisedEpoch.get() + " to " + newEpoch + " for client " + Server.getRemoteIp()); lastPromisedEpoch.set(newEpoch); // Since we have a new writer, reset the IPC serial - it will start // counting again from 0 for this writer. currentEpochIpcSerial = -1; } private void abortCurSegment() throws IOException { if (curSegment == null) { return; } curSegment.abort(); curSegment = null; curSegmentTxId = HdfsServerConstants.INVALID_TXID; } /** * Write a batch of edits to the journal. * {@see QJournalProtocol#journal(RequestInfo, long, long, int, byte[])} */ synchronized void journal(RequestInfo reqInfo, long segmentTxId, long firstTxnId, int numTxns, byte[] records) throws IOException { checkFormatted(); checkWriteRequest(reqInfo); checkSync(curSegment != null, "Can't write, no segment open"); if (curSegmentTxId != segmentTxId) { // Sanity check: it is possible that the writer will fail IPCs // on both the finalize() and then the start() of the next segment. // This could cause us to continue writing to an old segment // instead of rolling to a new one, which breaks one of the // invariants in the design. If it happens, abort the segment // and throw an exception. JournalOutOfSyncException e = new JournalOutOfSyncException( "Writer out of sync: it thinks it is writing segment " + segmentTxId + " but current segment is " + curSegmentTxId); abortCurSegment(); throw e; } checkSync(nextTxId == firstTxnId, "Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId); long lastTxnId = firstTxnId + numTxns - 1; if (LOG.isTraceEnabled()) { LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId); } // If the edit has already been marked as committed, we know // it has been fsynced on a quorum of other nodes, and we are // "catching up" with the rest. Hence we do not need to fsync. boolean isLagging = lastTxnId <= committedTxnId.get(); boolean shouldFsync = !isLagging; curSegment.writeRaw(records, 0, records.length); curSegment.setReadyToFlush(); StopWatch sw = new StopWatch(); sw.start(); curSegment.flush(shouldFsync); sw.stop(); long nanoSeconds = sw.now(); metrics.addSync( TimeUnit.MICROSECONDS.convert(nanoSeconds, TimeUnit.NANOSECONDS)); long milliSeconds = TimeUnit.MILLISECONDS.convert( nanoSeconds, TimeUnit.NANOSECONDS); if (milliSeconds > WARN_SYNC_MILLIS_THRESHOLD) { LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId + " took " + milliSeconds + "ms"); } if (isLagging) { // This batch of edits has already been committed on a quorum of other // nodes. So, we are in "catch up" mode. This gets its own metric. metrics.batchesWrittenWhileLagging.incr(1); } metrics.batchesWritten.incr(1); metrics.bytesWritten.incr(records.length); metrics.txnsWritten.incr(numTxns); highestWrittenTxId = lastTxnId; nextTxId = lastTxnId + 1; } public void heartbeat(RequestInfo reqInfo) throws IOException { checkRequest(reqInfo); } /** * Ensure that the given request is coming from the correct writer and in-order. * @param reqInfo the request info * @throws IOException if the request is invalid. */ private synchronized void checkRequest(RequestInfo reqInfo) throws IOException { // Invariant 25 from ZAB paper if (reqInfo.getEpoch() < lastPromisedEpoch.get()) { throw new IOException("IPC's epoch " + reqInfo.getEpoch() + " is less than the last promised epoch " + lastPromisedEpoch.get()); } else if (reqInfo.getEpoch() > lastPromisedEpoch.get()) { // A newer client has arrived. Fence any previous writers by updating // the promise. updateLastPromisedEpoch(reqInfo.getEpoch()); } // Ensure that the IPCs are arriving in-order as expected. checkSync(reqInfo.getIpcSerialNumber() > currentEpochIpcSerial, "IPC serial %s from client %s was not higher than prior highest " + "IPC serial %s", reqInfo.getIpcSerialNumber(), Server.getRemoteIp(), currentEpochIpcSerial); currentEpochIpcSerial = reqInfo.getIpcSerialNumber(); if (reqInfo.hasCommittedTxId()) { Preconditions.checkArgument( reqInfo.getCommittedTxId() >= committedTxnId.get(), "Client trying to move committed txid backward from " + committedTxnId.get() + " to " + reqInfo.getCommittedTxId()); committedTxnId.set(reqInfo.getCommittedTxId()); } } private synchronized void checkWriteRequest(RequestInfo reqInfo) throws IOException { checkRequest(reqInfo); if (reqInfo.getEpoch() != lastWriterEpoch.get()) { throw new IOException("IPC's epoch " + reqInfo.getEpoch() + " is not the current writer epoch " + lastWriterEpoch.get()); } } public synchronized boolean isFormatted() { return storage.isFormatted(); } private void checkFormatted() throws JournalNotFormattedException { if (!isFormatted()) { throw new JournalNotFormattedException("Journal " + storage.getSingularStorageDir() + " not formatted"); } } /** * @throws JournalOutOfSyncException if the given expression is not true. * The message of the exception is formatted using the 'msg' and * 'formatArgs' parameters. */ private void checkSync(boolean expression, String msg, Object... formatArgs) throws JournalOutOfSyncException { if (!expression) { throw new JournalOutOfSyncException(String.format(msg, formatArgs)); } } /** * @throws AssertionError if the given expression is not true. * The message of the exception is formatted using the 'msg' and * 'formatArgs' parameters. * * This should be used in preference to Java's built-in assert in * non-performance-critical paths, where a failure of this invariant * might cause the protocol to lose data. */ private void alwaysAssert(boolean expression, String msg, Object... formatArgs) { if (!expression) { throw new AssertionError(String.format(msg, formatArgs)); } } /** * Start a new segment at the given txid. The previous segment * must have already been finalized. */ public synchronized void startLogSegment(RequestInfo reqInfo, long txid, int layoutVersion) throws IOException { assert fjm != null; checkFormatted(); checkRequest(reqInfo); if (curSegment != null) { LOG.warn("Client is requesting a new log segment " + txid + " though we are already writing " + curSegment + ". " + "Aborting the current segment in order to begin the new one."); // The writer may have lost a connection to us and is now // re-connecting after the connection came back. // We should abort our own old segment. abortCurSegment(); } // Paranoid sanity check: we should never overwrite a finalized log file. // Additionally, if it's in-progress, it should have at most 1 transaction. // This can happen if the writer crashes exactly at the start of a segment. EditLogFile existing = fjm.getLogFile(txid); if (existing != null) { if (!existing.isInProgress()) { throw new IllegalStateException("Already have a finalized segment " + existing + " beginning at " + txid); } // If it's in-progress, it should only contain one transaction, // because the "startLogSegment" transaction is written alone at the // start of each segment. existing.scanLog(); if (existing.getLastTxId() != existing.getFirstTxId()) { throw new IllegalStateException("The log file " + existing + " seems to contain valid transactions"); } } long curLastWriterEpoch = lastWriterEpoch.get(); if (curLastWriterEpoch != reqInfo.getEpoch()) { LOG.info("Updating lastWriterEpoch from " + curLastWriterEpoch + " to " + reqInfo.getEpoch() + " for client " + Server.getRemoteIp()); lastWriterEpoch.set(reqInfo.getEpoch()); } // The fact that we are starting a segment at this txid indicates // that any previous recovery for this same segment was aborted. // Otherwise, no writer would have started writing. So, we can // remove the record of the older segment here. purgePaxosDecision(txid); curSegment = fjm.startLogSegment(txid, layoutVersion); curSegmentTxId = txid; nextTxId = txid; } /** * Finalize the log segment at the given transaction ID. */ public synchronized void finalizeLogSegment(RequestInfo reqInfo, long startTxId, long endTxId) throws IOException { checkFormatted(); checkRequest(reqInfo); boolean needsValidation = true; // Finalizing the log that the writer was just writing. if (startTxId == curSegmentTxId) { if (curSegment != null) { curSegment.close(); curSegment = null; curSegmentTxId = HdfsServerConstants.INVALID_TXID; } checkSync(nextTxId == endTxId + 1, "Trying to finalize in-progress log segment %s to end at " + "txid %s but only written up to txid %s", startTxId, endTxId, nextTxId - 1); // No need to validate the edit log if the client is finalizing // the log segment that it was just writing to. needsValidation = false; } FileJournalManager.EditLogFile elf = fjm.getLogFile(startTxId); if (elf == null) { throw new JournalOutOfSyncException("No log file to finalize at " + "transaction ID " + startTxId); } if (elf.isInProgress()) { if (needsValidation) { LOG.info("Validating log segment " + elf.getFile() + " about to be " + "finalized"); elf.scanLog(); checkSync(elf.getLastTxId() == endTxId, "Trying to finalize in-progress log segment %s to end at " + "txid %s but log %s on disk only contains up to txid %s", startTxId, endTxId, elf.getFile(), elf.getLastTxId()); } fjm.finalizeLogSegment(startTxId, endTxId); } else { Preconditions.checkArgument(endTxId == elf.getLastTxId(), "Trying to re-finalize already finalized log " + elf + " with different endTxId " + endTxId); } // Once logs are finalized, a different length will never be decided. // During recovery, we treat a finalized segment the same as an accepted // recovery. Thus, we no longer need to keep track of the previously- // accepted decision. The existence of the finalized log segment is enough. purgePaxosDecision(elf.getFirstTxId()); } /** * @see JournalManager#purgeLogsOlderThan(long) */ public synchronized void purgeLogsOlderThan(RequestInfo reqInfo, long minTxIdToKeep) throws IOException { checkFormatted(); checkRequest(reqInfo); storage.purgeDataOlderThan(minTxIdToKeep); } /** * Remove the previously-recorded 'accepted recovery' information * for a given log segment, once it is no longer necessary. * @param segmentTxId the transaction ID to purge * @throws IOException if the file could not be deleted */ private void purgePaxosDecision(long segmentTxId) throws IOException { File paxosFile = storage.getPaxosFile(segmentTxId); if (paxosFile.exists()) { if (!paxosFile.delete()) { throw new IOException("Unable to delete paxos file " + paxosFile); } } } /** * @see QJournalProtocol#getEditLogManifest(String, long, boolean) */ public RemoteEditLogManifest getEditLogManifest(long sinceTxId, boolean inProgressOk) throws IOException { // No need to checkRequest() here - anyone may ask for the list // of segments. checkFormatted(); List<RemoteEditLog> logs = fjm.getRemoteEditLogs(sinceTxId, inProgressOk); if (inProgressOk) { RemoteEditLog log = null; for (Iterator<RemoteEditLog> iter = logs.iterator(); iter.hasNext();) { log = iter.next(); if (log.isInProgress()) { iter.remove(); break; } } if (log != null && log.isInProgress()) { logs.add(new RemoteEditLog(log.getStartTxId(), getHighestWrittenTxId(), true)); } } return new RemoteEditLogManifest(logs); } /** * @return the current state of the given segment, or null if the * segment does not exist. */ @VisibleForTesting SegmentStateProto getSegmentInfo(long segmentTxId) throws IOException { EditLogFile elf = fjm.getLogFile(segmentTxId); if (elf == null) { return null; } if (elf.isInProgress()) { elf.scanLog(); } if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) { LOG.info("Edit log file " + elf + " appears to be empty. " + "Moving it aside..."); elf.moveAsideEmptyFile(); return null; } SegmentStateProto ret = SegmentStateProto.newBuilder() .setStartTxId(segmentTxId) .setEndTxId(elf.getLastTxId()) .setIsInProgress(elf.isInProgress()) .build(); LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " + TextFormat.shortDebugString(ret)); return ret; } /** * @see QJournalProtocol#prepareRecovery(RequestInfo, long) */ public synchronized PrepareRecoveryResponseProto prepareRecovery( RequestInfo reqInfo, long segmentTxId) throws IOException { checkFormatted(); checkRequest(reqInfo); abortCurSegment(); PrepareRecoveryResponseProto.Builder builder = PrepareRecoveryResponseProto.newBuilder(); PersistedRecoveryPaxosData previouslyAccepted = getPersistedPaxosData(segmentTxId); completeHalfDoneAcceptRecovery(previouslyAccepted); SegmentStateProto segInfo = getSegmentInfo(segmentTxId); boolean hasFinalizedSegment = segInfo != null && !segInfo.getIsInProgress(); if (previouslyAccepted != null && !hasFinalizedSegment) { SegmentStateProto acceptedState = previouslyAccepted.getSegmentState(); assert acceptedState.getEndTxId() == segInfo.getEndTxId() : "prev accepted: " + TextFormat.shortDebugString(previouslyAccepted)+ "\n" + "on disk: " + TextFormat.shortDebugString(segInfo); builder.setAcceptedInEpoch(previouslyAccepted.getAcceptedInEpoch()) .setSegmentState(previouslyAccepted.getSegmentState()); } else { if (segInfo != null) { builder.setSegmentState(segInfo); } } builder.setLastWriterEpoch(lastWriterEpoch.get()); if (committedTxnId.get() != HdfsServerConstants.INVALID_TXID) { builder.setLastCommittedTxId(committedTxnId.get()); } PrepareRecoveryResponseProto resp = builder.build(); LOG.info("Prepared recovery for segment " + segmentTxId + ": " + TextFormat.shortDebugString(resp)); return resp; } /** * @see QJournalProtocol#acceptRecovery(RequestInfo, QJournalProtocolProtos.SegmentStateProto, URL) */ public synchronized void acceptRecovery(RequestInfo reqInfo, SegmentStateProto segment, URL fromUrl) throws IOException { checkFormatted(); checkRequest(reqInfo); abortCurSegment(); long segmentTxId = segment.getStartTxId(); // Basic sanity checks that the segment is well-formed and contains // at least one transaction. Preconditions.checkArgument(segment.getEndTxId() > 0 && segment.getEndTxId() >= segmentTxId, "bad recovery state for segment %s: %s", segmentTxId, TextFormat.shortDebugString(segment)); PersistedRecoveryPaxosData oldData = getPersistedPaxosData(segmentTxId); PersistedRecoveryPaxosData newData = PersistedRecoveryPaxosData.newBuilder() .setAcceptedInEpoch(reqInfo.getEpoch()) .setSegmentState(segment) .build(); // If we previously acted on acceptRecovery() from a higher-numbered writer, // this call is out of sync. We should never actually trigger this, since the // checkRequest() call above should filter non-increasing epoch numbers. if (oldData != null) { alwaysAssert(oldData.getAcceptedInEpoch() <= reqInfo.getEpoch(), "Bad paxos transition, out-of-order epochs.\nOld: %s\nNew: %s\n", oldData, newData); } File syncedFile = null; SegmentStateProto currentSegment = getSegmentInfo(segmentTxId); if (currentSegment == null || currentSegment.getEndTxId() != segment.getEndTxId()) { if (currentSegment == null) { LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) + ": no current segment in place"); // Update the highest txid for lag metrics highestWrittenTxId = Math.max(segment.getEndTxId(), highestWrittenTxId); } else { LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) + ": old segment " + TextFormat.shortDebugString(currentSegment) + " is not the right length"); // Paranoid sanity check: if the new log is shorter than the log we // currently have, we should not end up discarding any transactions // which are already Committed. if (txnRange(currentSegment).containsLong(committedTxnId.get()) && !txnRange(segment).containsLong(committedTxnId.get())) { throw new AssertionError( "Cannot replace segment " + TextFormat.shortDebugString(currentSegment) + " with new segment " + TextFormat.shortDebugString(segment) + ": would discard already-committed txn " + committedTxnId.get()); } // Another paranoid check: we should not be asked to synchronize a log // on top of a finalized segment. alwaysAssert(currentSegment.getIsInProgress(), "Should never be asked to synchronize a different log on top of an " + "already-finalized segment"); // If we're shortening the log, update our highest txid // used for lag metrics. if (txnRange(currentSegment).containsLong(highestWrittenTxId)) { highestWrittenTxId = segment.getEndTxId(); } } syncedFile = syncLog(reqInfo, segment, fromUrl); } else { LOG.info("Skipping download of log " + TextFormat.shortDebugString(segment) + ": already have up-to-date logs"); } // This is one of the few places in the protocol where we have a single // RPC that results in two distinct actions: // // - 1) Downloads the new log segment data (above) // - 2) Records the new Paxos data about the synchronized segment (below) // // These need to be treated as a transaction from the perspective // of any external process. We do this by treating the persistPaxosData() // success as the "commit" of an atomic transaction. If we fail before // this point, the downloaded edit log will only exist at a temporary // path, and thus not change any externally visible state. If we fail // after this point, then any future prepareRecovery() call will see // the Paxos data, and by calling completeHalfDoneAcceptRecovery() will // roll forward the rename of the referenced log file. // // See also: HDFS-3955 // // The fault points here are exercised by the randomized fault injection // test case to ensure that this atomic "transaction" operates correctly. JournalFaultInjector.get().beforePersistPaxosData(); persistPaxosData(segmentTxId, newData); JournalFaultInjector.get().afterPersistPaxosData(); if (syncedFile != null) { FileUtil.replaceFile(syncedFile, storage.getInProgressEditLog(segmentTxId)); } LOG.info("Accepted recovery for segment " + segmentTxId + ": " + TextFormat.shortDebugString(newData)); } private LongRange txnRange(SegmentStateProto seg) { Preconditions.checkArgument(seg.hasEndTxId(), "invalid segment: %s", seg); return new LongRange(seg.getStartTxId(), seg.getEndTxId()); } /** * Synchronize a log segment from another JournalNode. The log is * downloaded from the provided URL into a temporary location on disk, * which is named based on the current request's epoch. * * @return the temporary location of the downloaded file */ private File syncLog(RequestInfo reqInfo, final SegmentStateProto segment, final URL url) throws IOException { final File tmpFile = storage.getSyncLogTemporaryFile( segment.getStartTxId(), reqInfo.getEpoch()); final List<File> localPaths = ImmutableList.of(tmpFile); LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) + " from " + url); SecurityUtil.doAsLoginUser( new PrivilegedExceptionAction<Void>() { @Override public Void run() throws IOException { // We may have lost our ticket since last checkpoint, log in again, just in case if (UserGroupInformation.isSecurityEnabled()) { UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab(); } boolean success = false; try { TransferFsImage.doGetUrl(url, localPaths, storage, true); assert tmpFile.exists(); success = true; } finally { if (!success) { if (!tmpFile.delete()) { LOG.warn("Failed to delete temporary file " + tmpFile); } } } return null; } }); return tmpFile; } /** * In the case the node crashes in between downloading a log segment * and persisting the associated paxos recovery data, the log segment * will be left in its temporary location on disk. Given the paxos data, * we can check if this was indeed the case, and &quot;roll forward&quot; * the atomic operation. * * See the inline comments in * {@link #acceptRecovery(RequestInfo, SegmentStateProto, URL)} for more * details. * * @throws IOException if the temporary file is unable to be renamed into * place */ private void completeHalfDoneAcceptRecovery( PersistedRecoveryPaxosData paxosData) throws IOException { if (paxosData == null) { return; } long segmentId = paxosData.getSegmentState().getStartTxId(); long epoch = paxosData.getAcceptedInEpoch(); File tmp = storage.getSyncLogTemporaryFile(segmentId, epoch); if (tmp.exists()) { File dst = storage.getInProgressEditLog(segmentId); LOG.info("Rolling forward previously half-completed synchronization: " + tmp + " -> " + dst); FileUtil.replaceFile(tmp, dst); } } /** * Retrieve the persisted data for recovering the given segment from disk. */ private PersistedRecoveryPaxosData getPersistedPaxosData(long segmentTxId) throws IOException { File f = storage.getPaxosFile(segmentTxId); if (!f.exists()) { // Default instance has no fields filled in (they're optional) return null; } InputStream in = new FileInputStream(f); try { PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in); Preconditions.checkState(ret != null && ret.getSegmentState().getStartTxId() == segmentTxId, "Bad persisted data for segment %s: %s", segmentTxId, ret); return ret; } finally { IOUtils.closeStream(in); } } /** * Persist data for recovering the given segment from disk. */ private void persistPaxosData(long segmentTxId, PersistedRecoveryPaxosData newData) throws IOException { File f = storage.getPaxosFile(segmentTxId); boolean success = false; AtomicFileOutputStream fos = new AtomicFileOutputStream(f); try { newData.writeDelimitedTo(fos); fos.write('\n'); // Write human-readable data after the protobuf. This is only // to assist in debugging -- it's not parsed at all. OutputStreamWriter writer = new OutputStreamWriter(fos, Charsets.UTF_8); writer.write(String.valueOf(newData)); writer.write('\n'); writer.flush(); fos.flush(); success = true; } finally { if (success) { IOUtils.closeStream(fos); } else { fos.abort(); } } } synchronized void discardSegments(long startTxId) throws IOException { storage.getJournalManager().discardSegments(startTxId); // we delete all the segments after the startTxId. let's reset committedTxnId committedTxnId.set(startTxId - 1); } public synchronized void doPreUpgrade() throws IOException { // Do not hold file lock on committedTxnId, because the containing // directory will be renamed. It will be reopened lazily on next access. IOUtils.cleanup(LOG, committedTxnId); storage.getJournalManager().doPreUpgrade(); } public synchronized void doUpgrade(StorageInfo sInfo) throws IOException { long oldCTime = storage.getCTime(); storage.cTime = sInfo.cTime; int oldLV = storage.getLayoutVersion(); storage.layoutVersion = sInfo.layoutVersion; LOG.info("Starting upgrade of edits directory: " + ".\n old LV = " + oldLV + "; old CTime = " + oldCTime + ".\n new LV = " + storage.getLayoutVersion() + "; new CTime = " + storage.getCTime()); storage.getJournalManager().doUpgrade(storage); storage.createPaxosDir(); // Copy over the contents of the epoch data files to the new dir. File currentDir = storage.getSingularStorageDir().getCurrentDir(); File previousDir = storage.getSingularStorageDir().getPreviousDir(); PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile( new File(previousDir, LAST_PROMISED_FILENAME), 0); PersistentLongFile prevLastWriterEpoch = new PersistentLongFile( new File(previousDir, LAST_WRITER_EPOCH), 0); BestEffortLongFile prevCommittedTxnId = new BestEffortLongFile( new File(previousDir, COMMITTED_TXID_FILENAME), HdfsServerConstants.INVALID_TXID); lastPromisedEpoch = new PersistentLongFile( new File(currentDir, LAST_PROMISED_FILENAME), 0); lastWriterEpoch = new PersistentLongFile( new File(currentDir, LAST_WRITER_EPOCH), 0); committedTxnId = new BestEffortLongFile( new File(currentDir, COMMITTED_TXID_FILENAME), HdfsServerConstants.INVALID_TXID); try { lastPromisedEpoch.set(prevLastPromisedEpoch.get()); lastWriterEpoch.set(prevLastWriterEpoch.get()); committedTxnId.set(prevCommittedTxnId.get()); } finally { IOUtils.cleanup(LOG, prevCommittedTxnId); } } public synchronized void doFinalize() throws IOException { LOG.info("Finalizing upgrade for journal " + storage.getRoot() + "." + (storage.getLayoutVersion()==0 ? "" : "\n cur LV = " + storage.getLayoutVersion() + "; cur CTime = " + storage.getCTime())); storage.getJournalManager().doFinalize(); } public Boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) throws IOException { return this.storage.getJournalManager().canRollBack(storage, prevStorage, targetLayoutVersion); } public synchronized void doRollback() throws IOException { // Do not hold file lock on committedTxnId, because the containing // directory will be renamed. It will be reopened lazily on next access. IOUtils.cleanup(LOG, committedTxnId); storage.getJournalManager().doRollback(); } public Long getJournalCTime() throws IOException { return storage.getJournalManager().getJournalCTime(); } }
39,628
35.864186
101
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.server; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URL; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.QJournalProtocolService; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.RequestInfo; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolPB; import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC.Server; import org.apache.hadoop.net.NetUtils; import com.google.protobuf.BlockingService; class JournalNodeRpcServer implements QJournalProtocol { private static final int HANDLER_COUNT = 5; private final JournalNode jn; private Server server; JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException { this.jn = jn; Configuration confCopy = new Configuration(conf); // Ensure that nagling doesn't kick in, which could cause latency issues. confCopy.setBoolean( CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY, true); InetSocketAddress addr = getAddress(confCopy); RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class, ProtobufRpcEngine.class); QJournalProtocolServerSideTranslatorPB translator = new QJournalProtocolServerSideTranslatorPB(this); BlockingService service = QJournalProtocolService .newReflectiveBlockingService(translator); this.server = new RPC.Builder(confCopy) .setProtocol(QJournalProtocolPB.class) .setInstance(service) .setBindAddress(addr.getHostName()) .setPort(addr.getPort()) .setNumHandlers(HANDLER_COUNT) .setVerbose(false) .build(); // set service-level authorization security policy if (confCopy.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { server.refreshServiceAcl(confCopy, new HDFSPolicyProvider()); } } void start() { this.server.start(); } public InetSocketAddress getAddress() { return server.getListenerAddress(); } void join() throws InterruptedException { this.server.join(); } void stop() { this.server.stop(); } static InetSocketAddress getAddress(Configuration conf) { String addr = conf.get( DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr, 0, DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY); } @Override public boolean isFormatted(String journalId) throws IOException { return jn.getOrCreateJournal(journalId).isFormatted(); } @SuppressWarnings("deprecation") @Override public GetJournalStateResponseProto getJournalState(String journalId) throws IOException { long epoch = jn.getOrCreateJournal(journalId).getLastPromisedEpoch(); return GetJournalStateResponseProto.newBuilder() .setLastPromisedEpoch(epoch) .setHttpPort(jn.getBoundHttpAddress().getPort()) .setFromURL(jn.getHttpServerURI()) .build(); } @Override public NewEpochResponseProto newEpoch(String journalId, NamespaceInfo nsInfo, long epoch) throws IOException { return jn.getOrCreateJournal(journalId).newEpoch(nsInfo, epoch); } @Override public void format(String journalId, NamespaceInfo nsInfo) throws IOException { jn.getOrCreateJournal(journalId).format(nsInfo); } @Override public void journal(RequestInfo reqInfo, long segmentTxId, long firstTxnId, int numTxns, byte[] records) throws IOException { jn.getOrCreateJournal(reqInfo.getJournalId()) .journal(reqInfo, segmentTxId, firstTxnId, numTxns, records); } @Override public void heartbeat(RequestInfo reqInfo) throws IOException { jn.getOrCreateJournal(reqInfo.getJournalId()) .heartbeat(reqInfo); } @Override public void startLogSegment(RequestInfo reqInfo, long txid, int layoutVersion) throws IOException { jn.getOrCreateJournal(reqInfo.getJournalId()) .startLogSegment(reqInfo, txid, layoutVersion); } @Override public void finalizeLogSegment(RequestInfo reqInfo, long startTxId, long endTxId) throws IOException { jn.getOrCreateJournal(reqInfo.getJournalId()) .finalizeLogSegment(reqInfo, startTxId, endTxId); } @Override public void purgeLogsOlderThan(RequestInfo reqInfo, long minTxIdToKeep) throws IOException { jn.getOrCreateJournal(reqInfo.getJournalId()) .purgeLogsOlderThan(reqInfo, minTxIdToKeep); } @SuppressWarnings("deprecation") @Override public GetEditLogManifestResponseProto getEditLogManifest(String jid, long sinceTxId, boolean inProgressOk) throws IOException { RemoteEditLogManifest manifest = jn.getOrCreateJournal(jid) .getEditLogManifest(sinceTxId, inProgressOk); return GetEditLogManifestResponseProto.newBuilder() .setManifest(PBHelper.convert(manifest)) .setHttpPort(jn.getBoundHttpAddress().getPort()) .setFromURL(jn.getHttpServerURI()) .build(); } @Override public PrepareRecoveryResponseProto prepareRecovery(RequestInfo reqInfo, long segmentTxId) throws IOException { return jn.getOrCreateJournal(reqInfo.getJournalId()) .prepareRecovery(reqInfo, segmentTxId); } @Override public void acceptRecovery(RequestInfo reqInfo, SegmentStateProto log, URL fromUrl) throws IOException { jn.getOrCreateJournal(reqInfo.getJournalId()) .acceptRecovery(reqInfo, log, fromUrl); } @Override public void doPreUpgrade(String journalId) throws IOException { jn.doPreUpgrade(journalId); } @Override public void doUpgrade(String journalId, StorageInfo sInfo) throws IOException { jn.doUpgrade(journalId, sInfo); } @Override public void doFinalize(String journalId) throws IOException { jn.doFinalize(journalId); } @Override public Boolean canRollBack(String journalId, StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) throws IOException { return jn.canRollBack(journalId, storage, prevStorage, targetLayoutVersion); } @Override public void doRollback(String journalId) throws IOException { jn.doRollback(journalId); } @Override public Long getJournalCTime(String journalId) throws IOException { return jn.getJournalCTime(journalId); } @Override public void discardSegments(String journalId, long startTxId) throws IOException { jn.discardSegments(journalId, startTxId); } }
8,559
33.65587
103
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalFaultInjector.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.server; import java.io.IOException; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; /** * Used for injecting faults in QuorumJournalManager tests. * Calls into this are a no-op in production code. */ @VisibleForTesting @InterfaceAudience.Private public class JournalFaultInjector { public static JournalFaultInjector instance = new JournalFaultInjector(); public static JournalFaultInjector get() { return instance; } public void beforePersistPaxosData() throws IOException {} public void afterPersistPaxosData() throws IOException {} }
1,475
34.142857
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.server; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; import javax.servlet.ServletContext; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.net.NetUtils; /** * Encapsulates the HTTP server started by the Journal Service. */ @InterfaceAudience.Private public class JournalNodeHttpServer { public static final String JN_ATTRIBUTE_KEY = "localjournal"; private HttpServer2 httpServer; private final JournalNode localJournalNode; private final Configuration conf; JournalNodeHttpServer(Configuration conf, JournalNode jn) { this.conf = conf; this.localJournalNode = jn; } void start() throws IOException { final InetSocketAddress httpAddr = getAddress(conf); final String httpsAddrString = conf.get( DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, httpAddr, httpsAddr, "journal", DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY); httpServer = builder.build(); httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode); httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); httpServer.addInternalServlet("getJournal", "/getJournal", GetJournalEditServlet.class, true); httpServer.start(); } void stop() throws IOException { if (httpServer != null) { try { httpServer.stop(); } catch (Exception e) { throw new IOException(e); } } } /** * Return the actual address bound to by the running server. */ @Deprecated public InetSocketAddress getAddress() { InetSocketAddress addr = httpServer.getConnectorAddress(0); assert addr.getPort() != 0; return addr; } /** * Return the URI that locates the HTTP server. */ URI getServerURI() { // getHttpClientScheme() only returns https for HTTPS_ONLY policy. This // matches the behavior that the first connector is a HTTPS connector only // for HTTPS_ONLY policy. InetSocketAddress addr = httpServer.getConnectorAddress(0); return URI.create(DFSUtil.getHttpClientScheme(conf) + "://" + NetUtils.getHostPortString(addr)); } private static InetSocketAddress getAddress(Configuration conf) { String addr = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr, DFSConfigKeys.DFS_JOURNALNODE_HTTP_PORT_DEFAULT, DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY); } public static Journal getJournalFromContext(ServletContext context, String jid) throws IOException { JournalNode jn = (JournalNode)context.getAttribute(JN_ATTRIBUTE_KEY); return jn.getOrCreateJournal(jid); } public static Configuration getConfFromContext(ServletContext context) { return (Configuration) context.getAttribute(JspHelper.CURRENT_CONF); } }
4,261
33.934426
81
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.qjournal.server; import java.io.IOException; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; /** * The server-side metrics for a journal from the JournalNode's * perspective. */ @Metrics(about="Journal metrics", context="dfs") class JournalMetrics { final MetricsRegistry registry = new MetricsRegistry("JournalNode"); @Metric("Number of batches written since startup") MutableCounterLong batchesWritten; @Metric("Number of txns written since startup") MutableCounterLong txnsWritten; @Metric("Number of bytes written since startup") MutableCounterLong bytesWritten; @Metric("Number of batches written where this node was lagging") MutableCounterLong batchesWrittenWhileLagging; private final int[] QUANTILE_INTERVALS = new int[] { 1*60, // 1m 5*60, // 5m 60*60 // 1h }; final MutableQuantiles[] syncsQuantiles; private final Journal journal; JournalMetrics(Journal journal) { this.journal = journal; syncsQuantiles = new MutableQuantiles[QUANTILE_INTERVALS.length]; for (int i = 0; i < syncsQuantiles.length; i++) { int interval = QUANTILE_INTERVALS[i]; syncsQuantiles[i] = registry.newQuantiles( "syncs" + interval + "s", "Journal sync time", "ops", "latencyMicros", interval); } } public static JournalMetrics create(Journal j) { JournalMetrics m = new JournalMetrics(j); return DefaultMetricsSystem.instance().register( m.getName(), null, m); } String getName() { return "Journal-" + journal.getJournalId(); } @Metric("Current writer's epoch") public long getLastWriterEpoch() { try { return journal.getLastWriterEpoch(); } catch (IOException e) { return -1L; } } @Metric("Last accepted epoch") public long getLastPromisedEpoch() { try { return journal.getLastPromisedEpoch(); } catch (IOException e) { return -1L; } } @Metric("The highest txid stored on this JN") public long getLastWrittenTxId() { return journal.getHighestWrittenTxId(); } @Metric("Number of transactions that this JN is lagging") public long getCurrentLagTxns() { try { return journal.getCurrentLagTxns(); } catch (IOException e) { return -1L; } } void addSync(long us) { for (MutableQuantiles q : syncsQuantiles) { q.add(us); } } }
3,525
28.630252
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/PeerServer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.net; import java.io.Closeable; import org.apache.hadoop.classification.InterfaceAudience; import java.io.IOException; import java.net.SocketTimeoutException; @InterfaceAudience.Private public interface PeerServer extends Closeable { /** * Set the receive buffer size of the PeerServer. * * @param size The receive buffer size. */ public void setReceiveBufferSize(int size) throws IOException; /** * Listens for a connection to be made to this server and accepts * it. The method blocks until a connection is made. * * @exception IOException if an I/O error occurs when waiting for a * connection. * @exception SecurityException if a security manager exists and its * <code>checkAccept</code> method doesn't allow the operation. * @exception SocketTimeoutException if a timeout was previously set and * the timeout has been reached. */ public Peer accept() throws IOException, SocketTimeoutException; /** * @return A string representation of the address we're * listening on. */ public String getListeningString(); /** * Free the resources associated with this peer server. * This normally includes sockets, etc. * * @throws IOException If there is an error closing the PeerServer */ public void close() throws IOException; }
2,242
35.770492
77
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.net; import java.io.IOException; import java.net.SocketTimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.net.unix.DomainSocket; @InterfaceAudience.Private public class DomainPeerServer implements PeerServer { static final Log LOG = LogFactory.getLog(DomainPeerServer.class); private final DomainSocket sock; DomainPeerServer(DomainSocket sock) { this.sock = sock; } public DomainPeerServer(String path, int port) throws IOException { this(DomainSocket.bindAndListen(DomainSocket.getEffectivePath(path, port))); } public String getBindPath() { return sock.getPath(); } @Override public void setReceiveBufferSize(int size) throws IOException { sock.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE, size); } @Override public Peer accept() throws IOException, SocketTimeoutException { DomainSocket connSock = sock.accept(); Peer peer = null; boolean success = false; try { peer = new DomainPeer(connSock); success = true; return peer; } finally { if (!success) { if (peer != null) peer.close(); connSock.close(); } } } @Override public String getListeningString() { return "unix:" + sock.getPath(); } @Override public void close() throws IOException { try { sock.close(); } catch (IOException e) { LOG.error("error closing DomainPeerServer: ", e); } } @Override public String toString() { return "DomainPeerServer(" + getListeningString() + ")"; } }
2,506
27.488636
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.net; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.channels.ReadableByteChannel; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.classification.InterfaceAudience; /** * Represents a peer that we communicate with by using blocking I/O * on a UNIX domain socket. */ @InterfaceAudience.Private public class DomainPeer implements Peer { private final DomainSocket socket; private final OutputStream out; private final InputStream in; private final ReadableByteChannel channel; public DomainPeer(DomainSocket socket) { this.socket = socket; this.out = socket.getOutputStream(); this.in = socket.getInputStream(); this.channel = socket.getChannel(); } @Override public ReadableByteChannel getInputStreamChannel() { return channel; } @Override public void setReadTimeout(int timeoutMs) throws IOException { socket.setAttribute(DomainSocket.RECEIVE_TIMEOUT, timeoutMs); } @Override public int getReceiveBufferSize() throws IOException { return socket.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE); } @Override public boolean getTcpNoDelay() throws IOException { /* No TCP, no TCP_NODELAY. */ return false; } @Override public void setWriteTimeout(int timeoutMs) throws IOException { socket.setAttribute(DomainSocket.SEND_TIMEOUT, timeoutMs); } @Override public boolean isClosed() { return !socket.isOpen(); } @Override public void close() throws IOException { socket.close(); } @Override public String getRemoteAddressString() { return "unix:" + socket.getPath(); } @Override public String getLocalAddressString() { return "<local>"; } @Override public InputStream getInputStream() throws IOException { return in; } @Override public OutputStream getOutputStream() throws IOException { return out; } @Override public boolean isLocal() { /* UNIX domain sockets can only be used for local communication. */ return true; } @Override public String toString() { return "DomainPeer(" + getRemoteAddressString() + ")"; } @Override public DomainSocket getDomainSocket() { return socket; } @Override public boolean hasSecureChannel() { // // Communication over domain sockets is assumed to be secure, since it // doesn't pass over any network. We also carefully control the privileges // that can be used on the domain socket inode and its parent directories. // See #{java.org.apache.hadoop.net.unix.DomainSocket#validateSocketPathSecurity0} // for details. // // So unless you are running as root or the hdfs superuser, you cannot // launch a man-in-the-middle attach on UNIX domain socket traffic. // return true; } }
3,671
26.609023
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.net; import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.channels.ReadableByteChannel; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.net.unix.DomainSocket; /** * Represents a connection to a peer. */ @InterfaceAudience.Private public interface Peer extends Closeable { /** * @return The input stream channel associated with this * peer, or null if it has none. */ public ReadableByteChannel getInputStreamChannel(); /** * Set the read timeout on this peer. * * @param timeoutMs The timeout in milliseconds. */ public void setReadTimeout(int timeoutMs) throws IOException; /** * @return The receive buffer size. */ public int getReceiveBufferSize() throws IOException; /** * @return True if TCP_NODELAY is turned on. */ public boolean getTcpNoDelay() throws IOException; /** * Set the write timeout on this peer. * * Note: this is not honored for BasicInetPeer. * See {@link BasicSocketPeer#setWriteTimeout} for details. * * @param timeoutMs The timeout in milliseconds. */ public void setWriteTimeout(int timeoutMs) throws IOException; /** * @return true only if the peer is closed. */ public boolean isClosed(); /** * Close the peer. * * It's safe to re-close a Peer that is already closed. */ public void close() throws IOException; /** * @return A string representing the remote end of our * connection to the peer. */ public String getRemoteAddressString(); /** * @return A string representing the local end of our * connection to the peer. */ public String getLocalAddressString(); /** * @return An InputStream associated with the Peer. * This InputStream will be valid until you close * this peer with Peer#close. */ public InputStream getInputStream() throws IOException; /** * @return An OutputStream associated with the Peer. * This OutputStream will be valid until you close * this peer with Peer#close. */ public OutputStream getOutputStream() throws IOException; /** * @return True if the peer resides on the same * computer as we. */ public boolean isLocal(); /** * @return The DomainSocket associated with the current * peer, or null if there is none. */ public DomainSocket getDomainSocket(); /** * Return true if the channel is secure. * * @return True if our channel to this peer is not * susceptible to man-in-the-middle attacks. */ public boolean hasSecureChannel(); }
3,861
30.145161
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.net; import java.io.IOException; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; import java.net.SocketTimeoutException; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.token.Token; @InterfaceAudience.Private public class TcpPeerServer implements PeerServer { static final Log LOG = LogFactory.getLog(TcpPeerServer.class); private final ServerSocket serverSocket; public static Peer peerFromSocket(Socket socket) throws IOException { Peer peer = null; boolean success = false; try { // TCP_NODELAY is crucial here because of bad interactions between // Nagle's Algorithm and Delayed ACKs. With connection keepalive // between the client and DN, the conversation looks like: // 1. Client -> DN: Read block X // 2. DN -> Client: data for block X // 3. Client -> DN: Status OK (successful read) // 4. Client -> DN: Read block Y // The fact that step #3 and #4 are both in the client->DN direction // triggers Nagling. If the DN is using delayed ACKs, this results // in a delay of 40ms or more. // // TCP_NODELAY disables nagling and thus avoids this performance // disaster. socket.setTcpNoDelay(true); SocketChannel channel = socket.getChannel(); if (channel == null) { peer = new BasicInetPeer(socket); } else { peer = new NioInetPeer(socket); } success = true; return peer; } finally { if (!success) { if (peer != null) peer.close(); socket.close(); } } } public static Peer peerFromSocketAndKey( SaslDataTransferClient saslClient, Socket s, DataEncryptionKeyFactory keyFactory, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; boolean success = false; try { peer = peerFromSocket(s); peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId); success = true; return peer; } finally { if (!success) { IOUtils.cleanup(null, peer); } } } /** * Create a non-secure TcpPeerServer. * * @param socketWriteTimeout The Socket write timeout in ms. * @param bindAddr The address to bind to. * @throws IOException */ public TcpPeerServer(int socketWriteTimeout, InetSocketAddress bindAddr) throws IOException { this.serverSocket = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(serverSocket, bindAddr, 0); } /** * Create a secure TcpPeerServer. * * @param secureResources Security resources. */ public TcpPeerServer(SecureResources secureResources) { this.serverSocket = secureResources.getStreamingSocket(); } /** * @return the IP address which this TcpPeerServer is listening on. */ public InetSocketAddress getStreamingAddr() { return new InetSocketAddress( serverSocket.getInetAddress().getHostAddress(), serverSocket.getLocalPort()); } @Override public void setReceiveBufferSize(int size) throws IOException { this.serverSocket.setReceiveBufferSize(size); } @Override public Peer accept() throws IOException, SocketTimeoutException { Peer peer = peerFromSocket(serverSocket.accept()); return peer; } @Override public String getListeningString() { return serverSocket.getLocalSocketAddress().toString(); } @Override public void close() throws IOException { try { serverSocket.close(); } catch(IOException e) { LOG.error("error closing TcpPeerServer: ", e); } } @Override public String toString() { return "TcpPeerServer(" + getListeningString() + ")"; } }
5,330
31.907407
84
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.net; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.Socket; import java.nio.channels.ReadableByteChannel; import org.apache.hadoop.net.unix.DomainSocket; /** * Represents a peer that we communicate with by using a basic Socket * that has no associated Channel. * */ class BasicInetPeer implements Peer { private final Socket socket; private final OutputStream out; private final InputStream in; private final boolean isLocal; public BasicInetPeer(Socket socket) throws IOException { this.socket = socket; this.out = socket.getOutputStream(); this.in = socket.getInputStream(); this.isLocal = socket.getInetAddress().equals(socket.getLocalAddress()); } @Override public ReadableByteChannel getInputStreamChannel() { /* * This Socket has no channel, so there's nothing to return here. */ return null; } @Override public void setReadTimeout(int timeoutMs) throws IOException { socket.setSoTimeout(timeoutMs); } @Override public int getReceiveBufferSize() throws IOException { return socket.getReceiveBufferSize(); } @Override public boolean getTcpNoDelay() throws IOException { return socket.getTcpNoDelay(); } @Override public void setWriteTimeout(int timeoutMs) { /* * We can't implement write timeouts. :( * * Java provides no facility to set a blocking write timeout on a Socket. * You can simulate a blocking write with a timeout by using * non-blocking I/O. However, we can't use nio here, because this Socket * doesn't have an associated Channel. * * See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4031100 for * more details. */ } @Override public boolean isClosed() { return socket.isClosed(); } @Override public void close() throws IOException { socket.close(); } @Override public String getRemoteAddressString() { return socket.getRemoteSocketAddress().toString(); } @Override public String getLocalAddressString() { return socket.getLocalSocketAddress().toString(); } @Override public InputStream getInputStream() throws IOException { return in; } @Override public OutputStream getOutputStream() throws IOException { return out; } @Override public boolean isLocal() { return isLocal; } @Override public String toString() { return "BasicInetPeer(" + socket.toString() + ")"; } @Override public DomainSocket getDomainSocket() { return null; } @Override public boolean hasSecureChannel() { return false; } }
3,473
24.925373
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.net; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.Socket; import java.nio.channels.ReadableByteChannel; import org.apache.hadoop.net.SocketInputStream; import org.apache.hadoop.net.SocketOutputStream; import org.apache.hadoop.net.unix.DomainSocket; /** * Represents a peer that we communicate with by using non-blocking I/O * on a Socket. */ class NioInetPeer implements Peer { private final Socket socket; /** * An InputStream which simulates blocking I/O with timeouts using NIO. */ private final SocketInputStream in; /** * An OutputStream which simulates blocking I/O with timeouts using NIO. */ private final SocketOutputStream out; private final boolean isLocal; NioInetPeer(Socket socket) throws IOException { this.socket = socket; this.in = new SocketInputStream(socket.getChannel(), 0); this.out = new SocketOutputStream(socket.getChannel(), 0); this.isLocal = socket.getInetAddress().equals(socket.getLocalAddress()); } @Override public ReadableByteChannel getInputStreamChannel() { return in; } @Override public void setReadTimeout(int timeoutMs) throws IOException { in.setTimeout(timeoutMs); } @Override public int getReceiveBufferSize() throws IOException { return socket.getReceiveBufferSize(); } @Override public boolean getTcpNoDelay() throws IOException { return socket.getTcpNoDelay(); } @Override public void setWriteTimeout(int timeoutMs) throws IOException { out.setTimeout(timeoutMs); } @Override public boolean isClosed() { return socket.isClosed(); } @Override public void close() throws IOException { // We always close the outermost streams-- in this case, 'in' and 'out' // Closing either one of these will also close the Socket. try { in.close(); } finally { out.close(); } } @Override public String getRemoteAddressString() { return socket.getRemoteSocketAddress().toString(); } @Override public String getLocalAddressString() { return socket.getLocalSocketAddress().toString(); } @Override public InputStream getInputStream() throws IOException { return in; } @Override public OutputStream getOutputStream() throws IOException { return out; } @Override public boolean isLocal() { return isLocal; } @Override public String toString() { return "NioInetPeer(" + socket.toString() + ")"; } @Override public DomainSocket getDomainSocket() { return null; } @Override public boolean hasSecureChannel() { return false; } }
3,490
24.481752
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.net; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.net.unix.DomainSocket; import java.io.InputStream; import java.io.OutputStream; import java.nio.channels.ReadableByteChannel; /** * Represents a peer that we communicate with by using an encrypted * communications medium. */ @InterfaceAudience.Private public class EncryptedPeer implements Peer { private final Peer enclosedPeer; /** * An encrypted InputStream. */ private final InputStream in; /** * An encrypted OutputStream. */ private final OutputStream out; /** * An encrypted ReadableByteChannel. */ private final ReadableByteChannel channel; public EncryptedPeer(Peer enclosedPeer, IOStreamPair ios) { this.enclosedPeer = enclosedPeer; this.in = ios.in; this.out = ios.out; this.channel = ios.in instanceof ReadableByteChannel ? (ReadableByteChannel)ios.in : null; } @Override public ReadableByteChannel getInputStreamChannel() { return channel; } @Override public void setReadTimeout(int timeoutMs) throws IOException { enclosedPeer.setReadTimeout(timeoutMs); } @Override public int getReceiveBufferSize() throws IOException { return enclosedPeer.getReceiveBufferSize(); } @Override public boolean getTcpNoDelay() throws IOException { return enclosedPeer.getTcpNoDelay(); } @Override public void setWriteTimeout(int timeoutMs) throws IOException { enclosedPeer.setWriteTimeout(timeoutMs); } @Override public boolean isClosed() { return enclosedPeer.isClosed(); } @Override public void close() throws IOException { try { in.close(); } finally { try { out.close(); } finally { enclosedPeer.close(); } } } @Override public String getRemoteAddressString() { return enclosedPeer.getRemoteAddressString(); } @Override public String getLocalAddressString() { return enclosedPeer.getLocalAddressString(); } @Override public InputStream getInputStream() throws IOException { return in; } @Override public OutputStream getOutputStream() throws IOException { return out; } @Override public boolean isLocal() { return enclosedPeer.isLocal(); } @Override public String toString() { return "EncryptedPeer(" + enclosedPeer + ")"; } @Override public DomainSocket getDomainSocket() { return enclosedPeer.getDomainSocket(); } @Override public boolean hasSecureChannel() { return true; } }
3,488
23.398601
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.mover; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.commons.cli.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.server.balancer.Dispatcher; import org.apache.hadoop.hdfs.server.balancer.Dispatcher.*; import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup; import org.apache.hadoop.hdfs.server.balancer.ExitStatus; import org.apache.hadoop.hdfs.server.balancer.Matcher; import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.net.URI; import java.text.DateFormat; import java.util.*; import java.util.concurrent.atomic.AtomicInteger; @InterfaceAudience.Private public class Mover { static final Log LOG = LogFactory.getLog(Mover.class); static final Path MOVER_ID_PATH = new Path("/system/mover.id"); private static class StorageMap { private final StorageGroupMap<Source> sources = new StorageGroupMap<Source>(); private final StorageGroupMap<StorageGroup> targets = new StorageGroupMap<StorageGroup>(); private final EnumMap<StorageType, List<StorageGroup>> targetStorageTypeMap = new EnumMap<StorageType, List<StorageGroup>>(StorageType.class); private StorageMap() { for(StorageType t : StorageType.getMovableTypes()) { targetStorageTypeMap.put(t, new LinkedList<StorageGroup>()); } } private void add(Source source, StorageGroup target) { sources.put(source); if (target != null) { targets.put(target); getTargetStorages(target.getStorageType()).add(target); } } private Source getSource(MLocation ml) { return get(sources, ml); } private StorageGroup getTarget(String uuid, StorageType storageType) { return targets.get(uuid, storageType); } private static <G extends StorageGroup> G get(StorageGroupMap<G> map, MLocation ml) { return map.get(ml.datanode.getDatanodeUuid(), ml.storageType); } private List<StorageGroup> getTargetStorages(StorageType t) { return targetStorageTypeMap.get(t); } } private final Dispatcher dispatcher; private final StorageMap storages; private final List<Path> targetPaths; private final int retryMaxAttempts; private final AtomicInteger retryCount; private final BlockStoragePolicy[] blockStoragePolicies; Mover(NameNodeConnector nnc, Configuration conf, AtomicInteger retryCount) { final long movedWinWidth = conf.getLong( DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT); final int moverThreads = conf.getInt( DFSConfigKeys.DFS_MOVER_MOVERTHREADS_KEY, DFSConfigKeys.DFS_MOVER_MOVERTHREADS_DEFAULT); final int maxConcurrentMovesPerNode = conf.getInt( DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT); this.retryMaxAttempts = conf.getInt( DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT); this.retryCount = retryCount; this.dispatcher = new Dispatcher(nnc, Collections.<String> emptySet(), Collections.<String> emptySet(), movedWinWidth, moverThreads, 0, maxConcurrentMovesPerNode, conf); this.storages = new StorageMap(); this.targetPaths = nnc.getTargetPaths(); this.blockStoragePolicies = new BlockStoragePolicy[1 << BlockStoragePolicySuite.ID_BIT_LENGTH]; } void init() throws IOException { initStoragePolicies(); final List<DatanodeStorageReport> reports = dispatcher.init(); for(DatanodeStorageReport r : reports) { final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo()); for(StorageType t : StorageType.getMovableTypes()) { final Source source = dn.addSource(t, Long.MAX_VALUE, dispatcher); final long maxRemaining = getMaxRemaining(r, t); final StorageGroup target = maxRemaining > 0L ? dn.addTarget(t, maxRemaining) : null; storages.add(source, target); } } } private void initStoragePolicies() throws IOException { Collection<BlockStoragePolicy> policies = dispatcher.getDistributedFileSystem().getAllStoragePolicies(); for (BlockStoragePolicy policy : policies) { this.blockStoragePolicies[policy.getId()] = policy; } } private ExitStatus run() { try { init(); return new Processor().processNamespace().getExitStatus(); } catch (IllegalArgumentException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.ILLEGAL_ARGUMENTS; } catch (IOException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.IO_EXCEPTION; } finally { dispatcher.shutdownNow(); } } DBlock newDBlock(Block block, List<MLocation> locations) { final DBlock db = new DBlock(block); for(MLocation ml : locations) { StorageGroup source = storages.getSource(ml); if (source != null) { db.addLocation(source); } } return db; } private static long getMaxRemaining(DatanodeStorageReport report, StorageType t) { long max = 0L; for(StorageReport r : report.getStorageReports()) { if (r.getStorage().getStorageType() == t) { if (r.getRemaining() > max) { max = r.getRemaining(); } } } return max; } /** * convert a snapshot path to non-snapshot path. E.g., * /foo/.snapshot/snapshot-name/bar --> /foo/bar */ private static String convertSnapshotPath(String[] pathComponents) { StringBuilder sb = new StringBuilder(Path.SEPARATOR); for (int i = 0; i < pathComponents.length; i++) { if (pathComponents[i].equals(HdfsConstants.DOT_SNAPSHOT_DIR)) { i++; } else { sb.append(pathComponents[i]); } } return sb.toString(); } class Processor { private final DFSClient dfs; private final List<String> snapshottableDirs = new ArrayList<String>(); Processor() { dfs = dispatcher.getDistributedFileSystem().getClient(); } private void getSnapshottableDirs() { SnapshottableDirectoryStatus[] dirs = null; try { dirs = dfs.getSnapshottableDirListing(); } catch (IOException e) { LOG.warn("Failed to get snapshottable directories." + " Ignore and continue.", e); } if (dirs != null) { for (SnapshottableDirectoryStatus dir : dirs) { snapshottableDirs.add(dir.getFullPath().toString()); } } } /** * @return true if the given path is a snapshot path and the corresponding * INode is still in the current fsdirectory. */ private boolean isSnapshotPathInCurrent(String path) throws IOException { // if the parent path contains "/.snapshot/", this is a snapshot path if (path.contains(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR)) { String[] pathComponents = INode.getPathNames(path); if (HdfsConstants.DOT_SNAPSHOT_DIR .equals(pathComponents[pathComponents.length - 2])) { // this is a path for a specific snapshot (e.g., /foo/.snapshot/s1) return false; } String nonSnapshotPath = convertSnapshotPath(pathComponents); return dfs.getFileInfo(nonSnapshotPath) != null; } else { return false; } } /** * @return whether there is still remaining migration work for the next * round */ private Result processNamespace() throws IOException { getSnapshottableDirs(); Result result = new Result(); for (Path target : targetPaths) { processPath(target.toUri().getPath(), result); } // wait for pending move to finish and retry the failed migration boolean hasFailed = Dispatcher.waitForMoveCompletion(storages.targets .values()); boolean hasSuccess = Dispatcher.checkForSuccess(storages.targets .values()); if (hasFailed && !hasSuccess) { if (retryCount.get() == retryMaxAttempts) { result.setRetryFailed(); LOG.error("Failed to move some block's after " + retryMaxAttempts + " retries."); return result; } else { retryCount.incrementAndGet(); } } else { // Reset retry count if no failure. retryCount.set(0); } result.updateHasRemaining(hasFailed); return result; } /** * @return whether there is still remaing migration work for the next * round */ private void processPath(String fullPath, Result result) { for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;;) { final DirectoryListing children; try { children = dfs.listPaths(fullPath, lastReturnedName, true); } catch(IOException e) { LOG.warn("Failed to list directory " + fullPath + ". Ignore the directory and continue.", e); return; } if (children == null) { return; } for (HdfsFileStatus child : children.getPartialListing()) { processRecursively(fullPath, child, result); } if (children.hasMore()) { lastReturnedName = children.getLastName(); } else { return; } } } /** @return whether the migration requires next round */ private void processRecursively(String parent, HdfsFileStatus status, Result result) { String fullPath = status.getFullName(parent); if (status.isDir()) { if (!fullPath.endsWith(Path.SEPARATOR)) { fullPath = fullPath + Path.SEPARATOR; } processPath(fullPath, result); // process snapshots if this is a snapshottable directory if (snapshottableDirs.contains(fullPath)) { final String dirSnapshot = fullPath + HdfsConstants.DOT_SNAPSHOT_DIR; processPath(dirSnapshot, result); } } else if (!status.isSymlink()) { // file try { if (!isSnapshotPathInCurrent(fullPath)) { // the full path is a snapshot path but it is also included in the // current directory tree, thus ignore it. processFile(fullPath, (HdfsLocatedFileStatus) status, result); } } catch (IOException e) { LOG.warn("Failed to check the status of " + parent + ". Ignore it and continue.", e); } } } /** @return true if it is necessary to run another round of migration */ private void processFile(String fullPath, HdfsLocatedFileStatus status, Result result) { final byte policyId = status.getStoragePolicy(); // currently we ignore files with unspecified storage policy if (policyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { return; } final BlockStoragePolicy policy = blockStoragePolicies[policyId]; if (policy == null) { LOG.warn("Failed to get the storage policy of file " + fullPath); return; } final List<StorageType> types = policy.chooseStorageTypes( status.getReplication()); final LocatedBlocks locatedBlocks = status.getBlockLocations(); final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete(); List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks(); for (int i = 0; i < lbs.size(); i++) { if (i == lbs.size() - 1 && !lastBlkComplete) { // last block is incomplete, skip it continue; } LocatedBlock lb = lbs.get(i); final StorageTypeDiff diff = new StorageTypeDiff(types, lb.getStorageTypes()); if (!diff.removeOverlap(true)) { if (scheduleMoves4Block(diff, lb)) { result.updateHasRemaining(diff.existing.size() > 1 && diff.expected.size() > 1); // One block scheduled successfully, set noBlockMoved to false result.setNoBlockMoved(false); } else { result.updateHasRemaining(true); } } } } boolean scheduleMoves4Block(StorageTypeDiff diff, LocatedBlock lb) { final List<MLocation> locations = MLocation.toLocations(lb); Collections.shuffle(locations); final DBlock db = newDBlock(lb.getBlock().getLocalBlock(), locations); for (final StorageType t : diff.existing) { for (final MLocation ml : locations) { final Source source = storages.getSource(ml); if (ml.storageType == t && source != null) { // try to schedule one replica move. if (scheduleMoveReplica(db, source, diff.expected)) { return true; } } } } return false; } @VisibleForTesting boolean scheduleMoveReplica(DBlock db, MLocation ml, List<StorageType> targetTypes) { final Source source = storages.getSource(ml); return source == null ? false : scheduleMoveReplica(db, source, targetTypes); } boolean scheduleMoveReplica(DBlock db, Source source, List<StorageType> targetTypes) { // Match storage on the same node if (chooseTargetInSameNode(db, source, targetTypes)) { return true; } if (dispatcher.getCluster().isNodeGroupAware()) { if (chooseTarget(db, source, targetTypes, Matcher.SAME_NODE_GROUP)) { return true; } } // Then, match nodes on the same rack if (chooseTarget(db, source, targetTypes, Matcher.SAME_RACK)) { return true; } // At last, match all remaining nodes return chooseTarget(db, source, targetTypes, Matcher.ANY_OTHER); } /** * Choose the target storage within same Datanode if possible. */ boolean chooseTargetInSameNode(DBlock db, Source source, List<StorageType> targetTypes) { for (StorageType t : targetTypes) { StorageGroup target = storages.getTarget(source.getDatanodeInfo() .getDatanodeUuid(), t); if (target == null) { continue; } final PendingMove pm = source.addPendingMove(db, target); if (pm != null) { dispatcher.executePendingMove(pm); return true; } } return false; } boolean chooseTarget(DBlock db, Source source, List<StorageType> targetTypes, Matcher matcher) { final NetworkTopology cluster = dispatcher.getCluster(); for (StorageType t : targetTypes) { for(StorageGroup target : storages.getTargetStorages(t)) { if (matcher.match(cluster, source.getDatanodeInfo(), target.getDatanodeInfo())) { final PendingMove pm = source.addPendingMove(db, target); if (pm != null) { dispatcher.executePendingMove(pm); return true; } } } } return false; } } static class MLocation { final DatanodeInfo datanode; final StorageType storageType; final long size; MLocation(DatanodeInfo datanode, StorageType storageType, long size) { this.datanode = datanode; this.storageType = storageType; this.size = size; } static List<MLocation> toLocations(LocatedBlock lb) { final DatanodeInfo[] datanodeInfos = lb.getLocations(); final StorageType[] storageTypes = lb.getStorageTypes(); final long size = lb.getBlockSize(); final List<MLocation> locations = new LinkedList<MLocation>(); for(int i = 0; i < datanodeInfos.length; i++) { locations.add(new MLocation(datanodeInfos[i], storageTypes[i], size)); } return locations; } } @VisibleForTesting static class StorageTypeDiff { final List<StorageType> expected; final List<StorageType> existing; StorageTypeDiff(List<StorageType> expected, StorageType[] existing) { this.expected = new LinkedList<StorageType>(expected); this.existing = new LinkedList<StorageType>(Arrays.asList(existing)); } /** * Remove the overlap between the expected types and the existing types. * @param ignoreNonMovable ignore non-movable storage types * by removing them from both expected and existing storage type list * to prevent non-movable storage from being moved. * @returns if the existing types or the expected types is empty after * removing the overlap. */ boolean removeOverlap(boolean ignoreNonMovable) { for(Iterator<StorageType> i = existing.iterator(); i.hasNext(); ) { final StorageType t = i.next(); if (expected.remove(t)) { i.remove(); } } if (ignoreNonMovable) { removeNonMovable(existing); removeNonMovable(expected); } return expected.isEmpty() || existing.isEmpty(); } void removeNonMovable(List<StorageType> types) { for (Iterator<StorageType> i = types.iterator(); i.hasNext(); ) { final StorageType t = i.next(); if (!t.isMovable()) { i.remove(); } } } @Override public String toString() { return getClass().getSimpleName() + "{expected=" + expected + ", existing=" + existing + "}"; } } static int run(Map<URI, List<Path>> namenodes, Configuration conf) throws IOException, InterruptedException { final long sleeptime = conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000 + conf.getLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000; AtomicInteger retryCount = new AtomicInteger(0); LOG.info("namenodes = " + namenodes); List<NameNodeConnector> connectors = Collections.emptyList(); try { connectors = NameNodeConnector.newNameNodeConnectors(namenodes, Mover.class.getSimpleName(), MOVER_ID_PATH, conf, NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS); while (connectors.size() > 0) { Collections.shuffle(connectors); Iterator<NameNodeConnector> iter = connectors.iterator(); while (iter.hasNext()) { NameNodeConnector nnc = iter.next(); final Mover m = new Mover(nnc, conf, retryCount); final ExitStatus r = m.run(); if (r == ExitStatus.SUCCESS) { IOUtils.cleanup(LOG, nnc); iter.remove(); } else if (r != ExitStatus.IN_PROGRESS) { // must be an error statue, return return r.getExitCode(); } } Thread.sleep(sleeptime); } return ExitStatus.SUCCESS.getExitCode(); } finally { for (NameNodeConnector nnc : connectors) { IOUtils.cleanup(LOG, nnc); } } } static class Cli extends Configured implements Tool { private static final String USAGE = "Usage: hdfs mover " + "[-p <files/dirs> | -f <local file>]" + "\n\t-p <files/dirs>\ta space separated list of HDFS files/dirs to migrate." + "\n\t-f <local file>\ta local file containing a list of HDFS files/dirs to migrate."; private static Options buildCliOptions() { Options opts = new Options(); Option file = OptionBuilder.withArgName("pathsFile").hasArg() .withDescription("a local file containing files/dirs to migrate") .create("f"); Option paths = OptionBuilder.withArgName("paths").hasArgs() .withDescription("specify space separated files/dirs to migrate") .create("p"); OptionGroup group = new OptionGroup(); group.addOption(file); group.addOption(paths); opts.addOptionGroup(group); return opts; } private static String[] readPathFile(String file) throws IOException { List<String> list = Lists.newArrayList(); BufferedReader reader = new BufferedReader( new InputStreamReader(new FileInputStream(file), "UTF-8")); try { String line; while ((line = reader.readLine()) != null) { if (!line.trim().isEmpty()) { list.add(line); } } } finally { IOUtils.cleanup(LOG, reader); } return list.toArray(new String[list.size()]); } private static Map<URI, List<Path>> getNameNodePaths(CommandLine line, Configuration conf) throws Exception { Map<URI, List<Path>> map = Maps.newHashMap(); String[] paths = null; if (line.hasOption("f")) { paths = readPathFile(line.getOptionValue("f")); } else if (line.hasOption("p")) { paths = line.getOptionValues("p"); } Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); if (paths == null || paths.length == 0) { for (URI namenode : namenodes) { map.put(namenode, null); } return map; } final URI singleNs = namenodes.size() == 1 ? namenodes.iterator().next() : null; for (String path : paths) { Path target = new Path(path); if (!target.isUriPathAbsolute()) { throw new IllegalArgumentException("The path " + target + " is not absolute"); } URI targetUri = target.toUri(); if ((targetUri.getAuthority() == null || targetUri.getScheme() == null) && singleNs == null) { // each path must contains both scheme and authority information // unless there is only one name service specified in the // configuration throw new IllegalArgumentException("The path " + target + " does not contain scheme and authority thus cannot identify" + " its name service"); } URI key = singleNs; if (singleNs == null) { key = new URI(targetUri.getScheme(), targetUri.getAuthority(), null, null, null); if (!namenodes.contains(key)) { throw new IllegalArgumentException("Cannot resolve the path " + target + ". The namenode services specified in the " + "configuration: " + namenodes); } } List<Path> targets = map.get(key); if (targets == null) { targets = Lists.newArrayList(); map.put(key, targets); } targets.add(Path.getPathWithoutSchemeAndAuthority(target)); } return map; } @VisibleForTesting static Map<URI, List<Path>> getNameNodePathsToMove(Configuration conf, String... args) throws Exception { final Options opts = buildCliOptions(); CommandLineParser parser = new GnuParser(); CommandLine commandLine = parser.parse(opts, args, true); return getNameNodePaths(commandLine, conf); } @Override public int run(String[] args) throws Exception { final long startTime = Time.monotonicNow(); final Configuration conf = getConf(); try { final Map<URI, List<Path>> map = getNameNodePathsToMove(conf, args); return Mover.run(map, conf); } catch (IOException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.IO_EXCEPTION.getExitCode(); } catch (InterruptedException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.INTERRUPTED.getExitCode(); } catch (ParseException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.ILLEGAL_ARGUMENTS.getExitCode(); } catch (IllegalArgumentException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.ILLEGAL_ARGUMENTS.getExitCode(); } finally { System.out.format("%-24s ", DateFormat.getDateTimeInstance().format(new Date())); System.out.println("Mover took " + StringUtils.formatTime(Time.monotonicNow()-startTime)); } } } private static class Result { private boolean hasRemaining; private boolean noBlockMoved; private boolean retryFailed; Result() { hasRemaining = false; noBlockMoved = true; retryFailed = false; } boolean isHasRemaining() { return hasRemaining; } boolean isNoBlockMoved() { return noBlockMoved; } void updateHasRemaining(boolean hasRemaining) { this.hasRemaining |= hasRemaining; } void setNoBlockMoved(boolean noBlockMoved) { this.noBlockMoved = noBlockMoved; } void setRetryFailed() { this.retryFailed = true; } /** * @return NO_MOVE_PROGRESS if no progress in move after some retry. Return * SUCCESS if all moves are success and there is no remaining move. * Return NO_MOVE_BLOCK if there moves available but all the moves * cannot be scheduled. Otherwise, return IN_PROGRESS since there * must be some remaining moves. */ ExitStatus getExitStatus() { if (retryFailed) { return ExitStatus.NO_MOVE_PROGRESS; } else { return !isHasRemaining() ? ExitStatus.SUCCESS : isNoBlockMoved() ? ExitStatus.NO_MOVE_BLOCK : ExitStatus.IN_PROGRESS; } } } /** * Run a Mover in command line. * * @param args Command line arguments */ public static void main(String[] args) { if (DFSUtil.parseHelpArgument(args, Cli.USAGE, System.out, true)) { System.exit(0); } try { System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args)); } catch (Throwable e) { LOG.error("Exiting " + Mover.class.getSimpleName() + " due to an exception", e); System.exit(-1); } } }
27,932
34.628827
98
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageErrorReporter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.common; import java.io.File; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.namenode.JournalManager; /** * Interface which implementations of {@link JournalManager} can use to report * errors on underlying storage directories. This avoids a circular dependency * between journal managers and the storage which instantiates them. */ @InterfaceAudience.Private public interface StorageErrorReporter { /** * Indicate that some error occurred on the given file. * * @param f the file which had an error. */ public void reportErrorOnFile(File f); }
1,463
35.6
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.common; import java.io.File; import java.io.FileOutputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; import java.lang.management.ManagementFactory; import java.nio.channels.FileLock; import java.nio.channels.OverlappingFileLockException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Properties; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIOException; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; /** * Storage information file. * <p> * Local storage information is stored in a separate file VERSION. * It contains type of the node, * the storage layout version, the namespace id, and * the fs state creation time. * <p> * Local storage can reside in multiple directories. * Each directory should contain the same VERSION file as the others. * During startup Hadoop servers (name-node and data-nodes) read their local * storage information from them. * <p> * The servers hold a lock for each storage directory while they run so that * other nodes were not able to startup sharing the same storage. * The locks are released when the servers stop (normally or abnormally). * */ @InterfaceAudience.Private public abstract class Storage extends StorageInfo { public static final Log LOG = LogFactory.getLog(Storage.class.getName()); // last layout version that did not support upgrades public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3; // this corresponds to Hadoop-0.18 public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -16; protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.18"; /** Layout versions of 0.20.203 release */ public static final int[] LAYOUT_VERSIONS_203 = {-19, -31}; public static final String STORAGE_FILE_LOCK = "in_use.lock"; public static final String STORAGE_DIR_CURRENT = "current"; public static final String STORAGE_DIR_PREVIOUS = "previous"; public static final String STORAGE_TMP_REMOVED = "removed.tmp"; public static final String STORAGE_TMP_PREVIOUS = "previous.tmp"; public static final String STORAGE_TMP_FINALIZED = "finalized.tmp"; public static final String STORAGE_TMP_LAST_CKPT = "lastcheckpoint.tmp"; public static final String STORAGE_PREVIOUS_CKPT = "previous.checkpoint"; /** * The blocksBeingWritten directory which was used in some 1.x and earlier * releases. */ public static final String STORAGE_1_BBW = "blocksBeingWritten"; public enum StorageState { NON_EXISTENT, NOT_FORMATTED, COMPLETE_UPGRADE, RECOVER_UPGRADE, COMPLETE_FINALIZE, COMPLETE_ROLLBACK, RECOVER_ROLLBACK, COMPLETE_CHECKPOINT, RECOVER_CHECKPOINT, NORMAL; } /** * An interface to denote storage directory type * Implementations can define a type for storage directory by implementing * this interface. */ @InterfaceAudience.Private public interface StorageDirType { public StorageDirType getStorageDirType(); public boolean isOfType(StorageDirType type); } protected List<StorageDirectory> storageDirs = new ArrayList<StorageDirectory>(); private class DirIterator implements Iterator<StorageDirectory> { final StorageDirType dirType; final boolean includeShared; int prevIndex; // for remove() int nextIndex; // for next() DirIterator(StorageDirType dirType, boolean includeShared) { this.dirType = dirType; this.nextIndex = 0; this.prevIndex = 0; this.includeShared = includeShared; } @Override public boolean hasNext() { if (storageDirs.isEmpty() || nextIndex >= storageDirs.size()) return false; if (dirType != null || !includeShared) { while (nextIndex < storageDirs.size()) { if (shouldReturnNextDir()) break; nextIndex++; } if (nextIndex >= storageDirs.size()) return false; } return true; } @Override public StorageDirectory next() { StorageDirectory sd = getStorageDir(nextIndex); prevIndex = nextIndex; nextIndex++; if (dirType != null || !includeShared) { while (nextIndex < storageDirs.size()) { if (shouldReturnNextDir()) break; nextIndex++; } } return sd; } @Override public void remove() { nextIndex = prevIndex; // restore previous state storageDirs.remove(prevIndex); // remove last returned element hasNext(); // reset nextIndex to correct place } private boolean shouldReturnNextDir() { StorageDirectory sd = getStorageDir(nextIndex); return (dirType == null || sd.getStorageDirType().isOfType(dirType)) && (includeShared || !sd.isShared()); } } /** * @return A list of the given File in every available storage directory, * regardless of whether it might exist. */ public List<File> getFiles(StorageDirType dirType, String fileName) { ArrayList<File> list = new ArrayList<File>(); Iterator<StorageDirectory> it = (dirType == null) ? dirIterator() : dirIterator(dirType); for ( ;it.hasNext(); ) { list.add(new File(it.next().getCurrentDir(), fileName)); } return list; } /** * Return default iterator * This iterator returns all entries in storageDirs */ public Iterator<StorageDirectory> dirIterator() { return dirIterator(null); } /** * Return iterator based on Storage Directory Type * This iterator selects entries in storageDirs of type dirType and returns * them via the Iterator */ public Iterator<StorageDirectory> dirIterator(StorageDirType dirType) { return dirIterator(dirType, true); } /** * Return all entries in storageDirs, potentially excluding shared dirs. * @param includeShared whether or not to include shared dirs. * @return an iterator over the configured storage dirs. */ public Iterator<StorageDirectory> dirIterator(boolean includeShared) { return dirIterator(null, includeShared); } /** * @param dirType all entries will be of this type of dir * @param includeShared true to include any shared directories, * false otherwise * @return an iterator over the configured storage dirs. */ public Iterator<StorageDirectory> dirIterator(StorageDirType dirType, boolean includeShared) { return new DirIterator(dirType, includeShared); } public Iterable<StorageDirectory> dirIterable(final StorageDirType dirType) { return new Iterable<StorageDirectory>() { @Override public Iterator<StorageDirectory> iterator() { return dirIterator(dirType); } }; } /** * generate storage list (debug line) */ public String listStorageDirectories() { StringBuilder buf = new StringBuilder(); for (StorageDirectory sd : storageDirs) { buf.append(sd.getRoot() + "(" + sd.getStorageDirType() + ");"); } return buf.toString(); } /** * One of the storage directories. */ @InterfaceAudience.Private public static class StorageDirectory implements FormatConfirmable { final File root; // root directory // whether or not this dir is shared between two separate NNs for HA, or // between multiple block pools in the case of federation. final boolean isShared; final StorageDirType dirType; // storage dir type FileLock lock; // storage lock private String storageUuid = null; // Storage directory identifier. public StorageDirectory(File dir) { // default dirType is null this(dir, null, false); } public StorageDirectory(File dir, StorageDirType dirType) { this(dir, dirType, false); } public void setStorageUuid(String storageUuid) { this.storageUuid = storageUuid; } public String getStorageUuid() { return storageUuid; } /** * Constructor * @param dir directory corresponding to the storage * @param dirType storage directory type * @param isShared whether or not this dir is shared between two NNs. true * disables locking on the storage directory, false enables locking */ public StorageDirectory(File dir, StorageDirType dirType, boolean isShared) { this.root = dir; this.lock = null; this.dirType = dirType; this.isShared = isShared; } /** * Get root directory of this storage */ public File getRoot() { return root; } /** * Get storage directory type */ public StorageDirType getStorageDirType() { return dirType; } public void read(File from, Storage storage) throws IOException { Properties props = readPropertiesFile(from); storage.setFieldsFromProperties(props, this); } /** * Clear and re-create storage directory. * <p> * Removes contents of the current directory and creates an empty directory. * * This does not fully format storage directory. * It cannot write the version file since it should be written last after * all other storage type dependent files are written. * Derived storage is responsible for setting specific storage values and * writing the version file to disk. * * @throws IOException */ public void clearDirectory() throws IOException { File curDir = this.getCurrentDir(); if (curDir.exists()) if (!(FileUtil.fullyDelete(curDir))) throw new IOException("Cannot remove current directory: " + curDir); if (!curDir.mkdirs()) throw new IOException("Cannot create directory " + curDir); } /** * Directory {@code current} contains latest files defining * the file system meta-data. * * @return the directory path */ public File getCurrentDir() { return new File(root, STORAGE_DIR_CURRENT); } /** * File {@code VERSION} contains the following fields: * <ol> * <li>node type</li> * <li>layout version</li> * <li>namespaceID</li> * <li>fs state creation time</li> * <li>other fields specific for this node type</li> * </ol> * The version file is always written last during storage directory updates. * The existence of the version file indicates that all other files have * been successfully written in the storage directory, the storage is valid * and does not need to be recovered. * * @return the version file path */ public File getVersionFile() { return new File(new File(root, STORAGE_DIR_CURRENT), STORAGE_FILE_VERSION); } /** * File {@code VERSION} from the {@code previous} directory. * * @return the previous version file path */ public File getPreviousVersionFile() { return new File(new File(root, STORAGE_DIR_PREVIOUS), STORAGE_FILE_VERSION); } /** * Directory {@code previous} contains the previous file system state, * which the system can be rolled back to. * * @return the directory path */ public File getPreviousDir() { return new File(root, STORAGE_DIR_PREVIOUS); } /** * {@code previous.tmp} is a transient directory, which holds * current file system state while the new state is saved into the new * {@code current} during upgrade. * If the saving succeeds {@code previous.tmp} will be moved to * {@code previous}, otherwise it will be renamed back to * {@code current} by the recovery procedure during startup. * * @return the directory path */ public File getPreviousTmp() { return new File(root, STORAGE_TMP_PREVIOUS); } /** * {@code removed.tmp} is a transient directory, which holds * current file system state while the previous state is moved into * {@code current} during rollback. * If the moving succeeds {@code removed.tmp} will be removed, * otherwise it will be renamed back to * {@code current} by the recovery procedure during startup. * * @return the directory path */ public File getRemovedTmp() { return new File(root, STORAGE_TMP_REMOVED); } /** * {@code finalized.tmp} is a transient directory, which holds * the {@code previous} file system state while it is being removed * in response to the finalize request. * Finalize operation will remove {@code finalized.tmp} when completed, * otherwise the removal will resume upon the system startup. * * @return the directory path */ public File getFinalizedTmp() { return new File(root, STORAGE_TMP_FINALIZED); } /** * {@code lastcheckpoint.tmp} is a transient directory, which holds * current file system state while the new state is saved into the new * {@code current} during regular namespace updates. * If the saving succeeds {@code lastcheckpoint.tmp} will be moved to * {@code previous.checkpoint}, otherwise it will be renamed back to * {@code current} by the recovery procedure during startup. * * @return the directory path */ public File getLastCheckpointTmp() { return new File(root, STORAGE_TMP_LAST_CKPT); } /** * {@code previous.checkpoint} is a directory, which holds the previous * (before the last save) state of the storage directory. * The directory is created as a reference only, it does not play role * in state recovery procedures, and is recycled automatically, * but it may be useful for manual recovery of a stale state of the system. * * @return the directory path */ public File getPreviousCheckpoint() { return new File(root, STORAGE_PREVIOUS_CKPT); } /** * Check consistency of the storage directory * * @param startOpt a startup option. * * @return state {@link StorageState} of the storage directory * @throws InconsistentFSStateException if directory state is not * consistent and cannot be recovered. * @throws IOException */ public StorageState analyzeStorage(StartupOption startOpt, Storage storage) throws IOException { assert root != null : "root is null"; boolean hadMkdirs = false; String rootPath = root.getCanonicalPath(); try { // check that storage exists if (!root.exists()) { // storage directory does not exist if (startOpt != StartupOption.FORMAT && startOpt != StartupOption.HOTSWAP) { LOG.warn("Storage directory " + rootPath + " does not exist"); return StorageState.NON_EXISTENT; } LOG.info(rootPath + " does not exist. Creating ..."); if (!root.mkdirs()) throw new IOException("Cannot create directory " + rootPath); hadMkdirs = true; } // or is inaccessible if (!root.isDirectory()) { LOG.warn(rootPath + "is not a directory"); return StorageState.NON_EXISTENT; } if (!FileUtil.canWrite(root)) { LOG.warn("Cannot access storage directory " + rootPath); return StorageState.NON_EXISTENT; } } catch(SecurityException ex) { LOG.warn("Cannot access storage directory " + rootPath, ex); return StorageState.NON_EXISTENT; } this.lock(); // lock storage if it exists // If startOpt is HOTSWAP, it returns NOT_FORMATTED for empty directory, // while it also checks the layout version. if (startOpt == HdfsServerConstants.StartupOption.FORMAT || (startOpt == StartupOption.HOTSWAP && hadMkdirs)) return StorageState.NOT_FORMATTED; if (startOpt != HdfsServerConstants.StartupOption.IMPORT) { storage.checkOldLayoutStorage(this); } // check whether current directory is valid File versionFile = getVersionFile(); boolean hasCurrent = versionFile.exists(); // check which directories exist boolean hasPrevious = getPreviousDir().exists(); boolean hasPreviousTmp = getPreviousTmp().exists(); boolean hasRemovedTmp = getRemovedTmp().exists(); boolean hasFinalizedTmp = getFinalizedTmp().exists(); boolean hasCheckpointTmp = getLastCheckpointTmp().exists(); if (!(hasPreviousTmp || hasRemovedTmp || hasFinalizedTmp || hasCheckpointTmp)) { // no temp dirs - no recovery if (hasCurrent) return StorageState.NORMAL; if (hasPrevious) throw new InconsistentFSStateException(root, "version file in current directory is missing."); return StorageState.NOT_FORMATTED; } if ((hasPreviousTmp?1:0) + (hasRemovedTmp?1:0) + (hasFinalizedTmp?1:0) + (hasCheckpointTmp?1:0) > 1) // more than one temp dirs throw new InconsistentFSStateException(root, "too many temporary directories."); // # of temp dirs == 1 should either recover or complete a transition if (hasCheckpointTmp) { return hasCurrent ? StorageState.COMPLETE_CHECKPOINT : StorageState.RECOVER_CHECKPOINT; } if (hasFinalizedTmp) { if (hasPrevious) throw new InconsistentFSStateException(root, STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_FINALIZED + "cannot exist together."); return StorageState.COMPLETE_FINALIZE; } if (hasPreviousTmp) { if (hasPrevious) throw new InconsistentFSStateException(root, STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_PREVIOUS + " cannot exist together."); if (hasCurrent) return StorageState.COMPLETE_UPGRADE; return StorageState.RECOVER_UPGRADE; } assert hasRemovedTmp : "hasRemovedTmp must be true"; if (!(hasCurrent ^ hasPrevious)) throw new InconsistentFSStateException(root, "one and only one directory " + STORAGE_DIR_CURRENT + " or " + STORAGE_DIR_PREVIOUS + " must be present when " + STORAGE_TMP_REMOVED + " exists."); if (hasCurrent) return StorageState.COMPLETE_ROLLBACK; return StorageState.RECOVER_ROLLBACK; } /** * Complete or recover storage state from previously failed transition. * * @param curState specifies what/how the state should be recovered * @throws IOException */ public void doRecover(StorageState curState) throws IOException { File curDir = getCurrentDir(); String rootPath = root.getCanonicalPath(); switch(curState) { case COMPLETE_UPGRADE: // mv previous.tmp -> previous LOG.info("Completing previous upgrade for storage directory " + rootPath); rename(getPreviousTmp(), getPreviousDir()); return; case RECOVER_UPGRADE: // mv previous.tmp -> current LOG.info("Recovering storage directory " + rootPath + " from previous upgrade"); if (curDir.exists()) deleteDir(curDir); rename(getPreviousTmp(), curDir); return; case COMPLETE_ROLLBACK: // rm removed.tmp LOG.info("Completing previous rollback for storage directory " + rootPath); deleteDir(getRemovedTmp()); return; case RECOVER_ROLLBACK: // mv removed.tmp -> current LOG.info("Recovering storage directory " + rootPath + " from previous rollback"); rename(getRemovedTmp(), curDir); return; case COMPLETE_FINALIZE: // rm finalized.tmp LOG.info("Completing previous finalize for storage directory " + rootPath); deleteDir(getFinalizedTmp()); return; case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint LOG.info("Completing previous checkpoint for storage directory " + rootPath); File prevCkptDir = getPreviousCheckpoint(); if (prevCkptDir.exists()) deleteDir(prevCkptDir); rename(getLastCheckpointTmp(), prevCkptDir); return; case RECOVER_CHECKPOINT: // mv lastcheckpoint.tmp -> current LOG.info("Recovering storage directory " + rootPath + " from failed checkpoint"); if (curDir.exists()) deleteDir(curDir); rename(getLastCheckpointTmp(), curDir); return; default: throw new IOException("Unexpected FS state: " + curState); } } /** * @return true if the storage directory should prompt the user prior * to formatting (i.e if the directory appears to contain some data) * @throws IOException if the SD cannot be accessed due to an IO error */ @Override public boolean hasSomeData() throws IOException { // Its alright for a dir not to exist, or to exist (properly accessible) // and be completely empty. if (!root.exists()) return false; if (!root.isDirectory()) { // a file where you expect a directory should not cause silent // formatting return true; } if (FileUtil.listFiles(root).length == 0) { // Empty dir can format without prompt. return false; } return true; } public boolean isShared() { return isShared; } /** * Lock storage to provide exclusive access. * * <p> Locking is not supported by all file systems. * E.g., NFS does not consistently support exclusive locks. * * <p> If locking is supported we guarantee exclusive access to the * storage directory. Otherwise, no guarantee is given. * * @throws IOException if locking fails */ public void lock() throws IOException { if (isShared()) { LOG.info("Locking is disabled for " + this.root); return; } FileLock newLock = tryLock(); if (newLock == null) { String msg = "Cannot lock storage " + this.root + ". The directory is already locked"; LOG.info(msg); throw new IOException(msg); } // Don't overwrite lock until success - this way if we accidentally // call lock twice, the internal state won't be cleared by the second // (failed) lock attempt lock = newLock; } /** * Attempts to acquire an exclusive lock on the storage. * * @return A lock object representing the newly-acquired lock or * <code>null</code> if storage is already locked. * @throws IOException if locking fails. */ @SuppressWarnings("resource") FileLock tryLock() throws IOException { boolean deletionHookAdded = false; File lockF = new File(root, STORAGE_FILE_LOCK); if (!lockF.exists()) { lockF.deleteOnExit(); deletionHookAdded = true; } RandomAccessFile file = new RandomAccessFile(lockF, "rws"); String jvmName = ManagementFactory.getRuntimeMXBean().getName(); FileLock res = null; try { res = file.getChannel().tryLock(); if (null == res) { LOG.error("Unable to acquire file lock on path " + lockF.toString()); throw new OverlappingFileLockException(); } file.write(jvmName.getBytes(Charsets.UTF_8)); LOG.info("Lock on " + lockF + " acquired by nodename " + jvmName); } catch(OverlappingFileLockException oe) { // Cannot read from the locked file on Windows. String lockingJvmName = Path.WINDOWS ? "" : (" " + file.readLine()); LOG.error("It appears that another node " + lockingJvmName + " has already locked the storage directory: " + root, oe); file.close(); return null; } catch(IOException e) { LOG.error("Failed to acquire lock on " + lockF + ". If this storage directory is mounted via NFS, " + "ensure that the appropriate nfs lock services are running.", e); file.close(); throw e; } if (!deletionHookAdded) { // If the file existed prior to our startup, we didn't // call deleteOnExit above. But since we successfully locked // the dir, we can take care of cleaning it up. lockF.deleteOnExit(); } return res; } /** * Unlock storage. * * @throws IOException */ public void unlock() throws IOException { if (this.lock == null) return; this.lock.release(); lock.channel().close(); lock = null; } @Override public String toString() { return "Storage Directory " + this.root; } /** * Check whether underlying file system supports file locking. * * @return <code>true</code> if exclusive locks are supported or * <code>false</code> otherwise. * @throws IOException * @see StorageDirectory#lock() */ public boolean isLockSupported() throws IOException { FileLock firstLock = null; FileLock secondLock = null; try { firstLock = lock; if(firstLock == null) { firstLock = tryLock(); if(firstLock == null) return true; } secondLock = tryLock(); if(secondLock == null) return true; } finally { if(firstLock != null && firstLock != lock) { firstLock.release(); firstLock.channel().close(); } if(secondLock != null) { secondLock.release(); secondLock.channel().close(); } } return false; } } /** * Create empty storage info of the specified type */ protected Storage(NodeType type) { super(type); } protected Storage(StorageInfo storageInfo) { super(storageInfo); } public int getNumStorageDirs() { return storageDirs.size(); } public StorageDirectory getStorageDir(int idx) { return storageDirs.get(idx); } /** * @return the storage directory, with the precondition that this storage * has exactly one storage directory */ public StorageDirectory getSingularStorageDir() { Preconditions.checkState(storageDirs.size() == 1); return storageDirs.get(0); } protected void addStorageDir(StorageDirectory sd) { storageDirs.add(sd); } /** * Returns true if the storage directory on the given directory is already * loaded. * @param root the root directory of a {@link StorageDirectory} * @throws IOException if failed to get canonical path. */ protected boolean containsStorageDir(File root) throws IOException { for (StorageDirectory sd : storageDirs) { if (sd.getRoot().getCanonicalPath().equals(root.getCanonicalPath())) { return true; } } return false; } /** * Return true if the layout of the given storage directory is from a version * of Hadoop prior to the introduction of the "current" and "previous" * directories which allow upgrade and rollback. */ public abstract boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException; /** * Check if the given storage directory comes from a version of Hadoop * prior to when the directory layout changed (ie 0.13). If this is * the case, this method throws an IOException. */ private void checkOldLayoutStorage(StorageDirectory sd) throws IOException { if (isPreUpgradableLayout(sd)) { checkVersionUpgradable(0); } } /** * Checks if the upgrade from {@code oldVersion} is supported. * @param oldVersion the version of the metadata to check with the current * version * @throws IOException if upgrade is not supported */ public static void checkVersionUpgradable(int oldVersion) throws IOException { if (oldVersion > LAST_UPGRADABLE_LAYOUT_VERSION) { String msg = "*********** Upgrade is not supported from this " + " older version " + oldVersion + " of storage to the current version." + " Please upgrade to " + LAST_UPGRADABLE_HADOOP_VERSION + " or a later version and then upgrade to current" + " version. Old layout version is " + (oldVersion == 0 ? "'too old'" : (""+oldVersion)) + " and latest layout version this software version can" + " upgrade from is " + LAST_UPGRADABLE_LAYOUT_VERSION + ". ************"; LOG.error(msg); throw new IOException(msg); } } /** * Iterate over each of the {@link FormatConfirmable} objects, * potentially checking with the user whether it should be formatted. * * If running in interactive mode, will prompt the user for each * directory to allow them to format anyway. Otherwise, returns * false, unless 'force' is specified. * * @param force format regardless of whether dirs exist * @param interactive prompt the user when a dir exists * @return true if formatting should proceed * @throws IOException if some storage cannot be accessed */ public static boolean confirmFormat( Iterable<? extends FormatConfirmable> items, boolean force, boolean interactive) throws IOException { for (FormatConfirmable item : items) { if (!item.hasSomeData()) continue; if (force) { // Don't confirm, always format. System.err.println( "Data exists in " + item + ". Formatting anyway."); continue; } if (!interactive) { // Don't ask - always don't format System.err.println( "Running in non-interactive mode, and data appears to exist in " + item + ". Not formatting."); return false; } if (!ToolRunner.confirmPrompt("Re-format filesystem in " + item + " ?")) { System.err.println("Format aborted in " + item); return false; } } return true; } /** * Interface for classes which need to have the user confirm their * formatting during NameNode -format and other similar operations. * * This is currently a storage directory or journal manager. */ @InterfaceAudience.Private public interface FormatConfirmable { /** * @return true if the storage seems to have some valid data in it, * and the user should be required to confirm the format. Otherwise, * false. * @throws IOException if the storage cannot be accessed at all. */ public boolean hasSomeData() throws IOException; /** * @return a string representation of the formattable item, suitable * for display to the user inside a prompt */ public String toString(); } /** * Set common storage fields into the given properties object. * Should be overloaded if additional fields need to be set. * * @param props the Properties object to write into */ protected void setPropertiesFromFields(Properties props, StorageDirectory sd) throws IOException { props.setProperty("layoutVersion", String.valueOf(layoutVersion)); props.setProperty("storageType", storageType.toString()); props.setProperty("namespaceID", String.valueOf(namespaceID)); // Set clusterID in version with federation support if (versionSupportsFederation(getServiceLayoutFeatureMap())) { props.setProperty("clusterID", clusterID); } props.setProperty("cTime", String.valueOf(cTime)); } /** * Write properties to the VERSION file in the given storage directory. */ public void writeProperties(StorageDirectory sd) throws IOException { writeProperties(sd.getVersionFile(), sd); } public void writeProperties(File to, StorageDirectory sd) throws IOException { Properties props = new Properties(); setPropertiesFromFields(props, sd); writeProperties(to, props); } public static void writeProperties(File to, Properties props) throws IOException { try (RandomAccessFile file = new RandomAccessFile(to, "rws"); FileOutputStream out = new FileOutputStream(file.getFD())) { file.seek(0); /* * If server is interrupted before this line, * the version file will remain unchanged. */ props.store(out, null); /* * Now the new fields are flushed to the head of the file, but file * length can still be larger then required and therefore the file can * contain whole or corrupted fields from its old contents in the end. * If server is interrupted here and restarted later these extra fields * either should not effect server behavior or should be handled * by the server correctly. */ file.setLength(out.getChannel().position()); } } public static void rename(File from, File to) throws IOException { try { NativeIO.renameTo(from, to); } catch (NativeIOException e) { throw new IOException("Failed to rename " + from.getCanonicalPath() + " to " + to.getCanonicalPath() + " due to failure in native rename. " + e.toString()); } } /** * Copies a file (usually large) to a new location using native unbuffered IO. * <p> * This method copies the contents of the specified source file * to the specified destination file using OS specific unbuffered IO. * The goal is to avoid churning the file system buffer cache when copying * large files. * * We can't use FileUtils#copyFile from apache-commons-io because it * is a buffered IO based on FileChannel#transferFrom, which uses MmapByteBuffer * internally. * * The directory holding the destination file is created if it does not exist. * If the destination file exists, then this method will delete it first. * <p> * <strong>Note:</strong> Setting <code>preserveFileDate</code> to * {@code true} tries to preserve the file's last modified * date/times using {@link File#setLastModified(long)}, however it is * not guaranteed that the operation will succeed. * If the modification operation fails, no indication is provided. * * @param srcFile an existing file to copy, must not be {@code null} * @param destFile the new file, must not be {@code null} * @param preserveFileDate true if the file date of the copy * should be the same as the original * * @throws NullPointerException if source or destination is {@code null} * @throws IOException if source or destination is invalid * @throws IOException if an IO error occurs during copying */ public static void nativeCopyFileUnbuffered(File srcFile, File destFile, boolean preserveFileDate) throws IOException { if (srcFile == null) { throw new NullPointerException("Source must not be null"); } if (destFile == null) { throw new NullPointerException("Destination must not be null"); } if (srcFile.exists() == false) { throw new FileNotFoundException("Source '" + srcFile + "' does not exist"); } if (srcFile.isDirectory()) { throw new IOException("Source '" + srcFile + "' exists but is a directory"); } if (srcFile.getCanonicalPath().equals(destFile.getCanonicalPath())) { throw new IOException("Source '" + srcFile + "' and destination '" + destFile + "' are the same"); } File parentFile = destFile.getParentFile(); if (parentFile != null) { if (!parentFile.mkdirs() && !parentFile.isDirectory()) { throw new IOException("Destination '" + parentFile + "' directory cannot be created"); } } if (destFile.exists()) { if (FileUtil.canWrite(destFile) == false) { throw new IOException("Destination '" + destFile + "' exists but is read-only"); } else { if (destFile.delete() == false) { throw new IOException("Destination '" + destFile + "' exists but cannot be deleted"); } } } try { NativeIO.copyFileUnbuffered(srcFile, destFile); } catch (NativeIOException e) { throw new IOException("Failed to copy " + srcFile.getCanonicalPath() + " to " + destFile.getCanonicalPath() + " due to failure in NativeIO#copyFileUnbuffered(). " + e.toString()); } if (srcFile.length() != destFile.length()) { throw new IOException("Failed to copy full contents from '" + srcFile + "' to '" + destFile + "'"); } if (preserveFileDate) { if (destFile.setLastModified(srcFile.lastModified()) == false) { if (LOG.isDebugEnabled()) { LOG.debug("Failed to preserve last modified date from'" + srcFile + "' to '" + destFile + "'"); } } } } /** * Recursively delete all the content of the directory first and then * the directory itself from the local filesystem. * @param dir The directory to delete * @throws IOException */ public static void deleteDir(File dir) throws IOException { if (!FileUtil.fullyDelete(dir)) throw new IOException("Failed to delete " + dir.getCanonicalPath()); } /** * Write all data storage files. * @throws IOException */ public void writeAll() throws IOException { this.layoutVersion = getServiceLayoutVersion(); for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) { writeProperties(it.next()); } } /** * Unlock all storage directories. * @throws IOException */ public void unlockAll() throws IOException { for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) { it.next().unlock(); } } public static String getBuildVersion() { return VersionInfo.getRevision(); } public static String getRegistrationID(StorageInfo storage) { return "NS-" + Integer.toString(storage.getNamespaceID()) + "-" + storage.getClusterID() + "-" + Long.toString(storage.getCTime()); } public static boolean is203LayoutVersion(int layoutVersion) { for (int lv203 : LAYOUT_VERSIONS_203) { if (lv203 == layoutVersion) { return true; } } return false; } }
40,286
34.062663
103
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/GenerationStamp.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.common; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.SequentialNumber; /**************************************************************** * A GenerationStamp is a Hadoop FS primitive, identified by a long. ****************************************************************/ @InterfaceAudience.Private public class GenerationStamp extends SequentialNumber { /** * The last reserved generation stamp. */ public static final long LAST_RESERVED_STAMP = 1000L; /** * Create a new instance, initialized to {@link #LAST_RESERVED_STAMP}. */ public GenerationStamp() { super(LAST_RESERVED_STAMP); } }
1,509
36.75
75
java