repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Bellard.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.examples.pi.math; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TreeMap; import java.util.NoSuchElementException; import org.apache.hadoop.examples.pi.Container; import org.apache.hadoop.examples.pi.Util; /** * Bellard's BBP-type Pi formula * 1/2^6 \sum_{n=0}^\infty (-1)^n/2^{10n} * (-2^5/(4n+1) -1/(4n+3) +2^8/(10n+1) -2^6/(10n+3) -2^2/(10n+5) * -2^2/(10n+7) +1/(10n+9)) * * References: * * [1] David H. Bailey, Peter B. Borwein and Simon Plouffe. On the Rapid * Computation of Various Polylogarithmic Constants. * Math. Comp., 66:903-913, 1996. * * [2] Fabrice Bellard. A new formula to compute the n'th binary digit of pi, * 1997. Available at http://fabrice.bellard.free.fr/pi . */ public final class Bellard { /** Parameters for the sums */ public enum Parameter { // \sum_{k=0}^\infty (-1)^{k+1}( 2^{d-10k-1}/(4k+1) + 2^{d-10k-6}/(4k+3) ) P8_1(false, 1, 8, -1), P8_3(false, 3, 8, -6), P8_5(P8_1), P8_7(P8_3), /* * 2^d\sum_{k=0}^\infty (-1)^k( 2^{ 2-10k} / (10k + 1) * -2^{ -10k} / (10k + 3) * -2^{-4-10k} / (10k + 5) * -2^{-4-10k} / (10k + 7) * +2^{-6-10k} / (10k + 9) ) */ P20_21(true , 1, 20, 2), P20_3(false, 3, 20, 0), P20_5(false, 5, 20, -4), P20_7(false, 7, 20, -4), P20_9(true , 9, 20, -6), P20_11(P20_21), P20_13(P20_3), P20_15(P20_5), P20_17(P20_7), P20_19(P20_9); final boolean isplus; final long j; final int deltaN; final int deltaE; final int offsetE; private Parameter(boolean isplus, long j, int deltaN, int offsetE) { this.isplus = isplus; this.j = j; this.deltaN = deltaN; this.deltaE = -20; this.offsetE = offsetE; } private Parameter(Parameter p) { this.isplus = !p.isplus; this.j = p.j + (p.deltaN >> 1); this.deltaN = p.deltaN; this.deltaE = p.deltaE; this.offsetE = p.offsetE + (p.deltaE >> 1); } /** Get the Parameter represented by the String */ public static Parameter get(String s) { s = s.trim(); if (s.charAt(0) == 'P') s = s.substring(1); final String[] parts = s.split("\\D+"); if (parts.length >= 2) { final String name = "P" + parts[0] + "_" + parts[1]; for(Parameter p : values()) if (p.name().equals(name)) return p; } throw new IllegalArgumentException("s=" + s + ", parts=" + Arrays.asList(parts)); } } /** The sums in the Bellard's formula */ public static class Sum implements Container<Summation>, Iterable<Summation> { private static final long ACCURACY_BIT = 50; private final Parameter parameter; private final Summation sigma; private final Summation[] parts; private final Tail tail; /** Constructor */ private <T extends Container<Summation>> Sum(long b, Parameter p, int nParts, List<T> existing) { if (b < 0) throw new IllegalArgumentException("b = " + b + " < 0"); if (nParts < 1) throw new IllegalArgumentException("nParts = " + nParts + " < 1"); final long i = p.j == 1 && p.offsetE >= 0? 1 : 0; final long e = b + i*p.deltaE + p.offsetE; final long n = i*p.deltaN + p.j; this.parameter = p; this.sigma = new Summation(n, p.deltaN, e, p.deltaE, 0); this.parts = partition(sigma, nParts, existing); this.tail = new Tail(n, e); } private static <T extends Container<Summation>> Summation[] partition( Summation sigma, int nParts, List<T> existing) { final List<Summation> parts = new ArrayList<Summation>(); if (existing == null || existing.isEmpty()) parts.addAll(Arrays.asList(sigma.partition(nParts))); else { final long stepsPerPart = sigma.getSteps()/nParts; final List<Summation> remaining = sigma.remainingTerms(existing); for(Summation s : remaining) { final int n = (int)((s.getSteps() - 1)/stepsPerPart) + 1; parts.addAll(Arrays.asList(s.partition(n))); } for(Container<Summation> c : existing) parts.add(c.getElement()); Collections.sort(parts); } return parts.toArray(new Summation[parts.size()]); } /** {@inheritDoc} */ @Override public String toString() { int n = 0; for(Summation s : parts) if (s.getValue() == null) n++; return getClass().getSimpleName() + "{" + parameter + ": " + sigma + ", remaining=" + n + "}"; } /** Set the value of sigma */ public void setValue(Summation s) { if (s.getValue() == null) throw new IllegalArgumentException("s.getValue()" + "\n sigma=" + sigma + "\n s =" + s); if (!s.contains(sigma) || !sigma.contains(s)) throw new IllegalArgumentException("!s.contains(sigma) || !sigma.contains(s)" + "\n sigma=" + sigma + "\n s =" + s); sigma.setValue(s.getValue()); } /** get the value of sigma */ public double getValue() { if (sigma.getValue() == null) { double d = 0; for(int i = 0; i < parts.length; i++) d = Modular.addMod(d, parts[i].compute()); sigma.setValue(d); } final double s = Modular.addMod(sigma.getValue(), tail.compute()); return parameter.isplus? s: -s; } /** {@inheritDoc} */ @Override public Summation getElement() { if (sigma.getValue() == null) { int i = 0; double d = 0; for(; i < parts.length && parts[i].getValue() != null; i++) d = Modular.addMod(d, parts[i].getValue()); if (i == parts.length) sigma.setValue(d); } return sigma; } /** The sum tail */ private class Tail { private long n; private long e; private Tail(long n, long e) { this.n = n; this.e = e; } private double compute() { if (e > 0) { final long edelta = -sigma.E.delta; long q = e / edelta; long r = e % edelta; if (r == 0) { e = 0; n += q * sigma.N.delta; } else { e = edelta - r; n += (q + 1)*sigma.N.delta; } } else if (e < 0) e = -e; double s = 0; for(;; e -= sigma.E.delta) { if (e > ACCURACY_BIT || (1L << (ACCURACY_BIT - e)) < n) return s; s += 1.0 / (n << e); if (s >= 1) s--; n += sigma.N.delta; } } } /** {@inheritDoc} */ @Override public Iterator<Summation> iterator() { return new Iterator<Summation>() { private int i = 0; /** {@inheritDoc} */ @Override public boolean hasNext() {return i < parts.length;} /** {@inheritDoc} */ @Override public Summation next() throws NoSuchElementException { if (hasNext()) { return parts[i++]; } else { throw new NoSuchElementException("Sum's iterator does not have next!"); } } /** Unsupported */ @Override public void remove() {throw new UnsupportedOperationException();} }; } } /** Get the sums for the Bellard formula. */ public static <T extends Container<Summation>> Map<Parameter, Sum> getSums( long b, int partsPerSum, Map<Parameter, List<T>> existing) { final Map<Parameter, Sum> sums = new TreeMap<Parameter, Sum>(); for(Parameter p : Parameter.values()) { final Sum s = new Sum(b, p, partsPerSum, existing.get(p)); Util.out.println("put " + s); sums.put(p, s); } return sums; } /** Compute bits of Pi from the results. */ public static <T extends Container<Summation>> double computePi( final long b, Map<Parameter, T> results) { if (results.size() != Parameter.values().length) throw new IllegalArgumentException("m.size() != Parameter.values().length" + ", m.size()=" + results.size() + "\n m=" + results); double pi = 0; for(Parameter p : Parameter.values()) { final Summation sigma = results.get(p).getElement(); final Sum s = new Sum(b, p, 1, null); s.setValue(sigma); pi = Modular.addMod(pi, s.getValue()); } return pi; } /** Compute bits of Pi in the local machine. */ public static double computePi(final long b) { double pi = 0; for(Parameter p : Parameter.values()) pi = Modular.addMod(pi, new Sum(b, p, 1, null).getValue()); return pi; } /** Estimate the number of terms. */ public static long bit2terms(long b) { return 7*(b/10); } private static void computePi(Util.Timer t, long b) { t.tick(Util.pi2string(computePi(b), bit2terms(b))); } /** main */ public static void main(String[] args) throws IOException { final Util.Timer t = new Util.Timer(false); computePi(t, 0); computePi(t, 1); computePi(t, 2); computePi(t, 3); computePi(t, 4); Util.printBitSkipped(1008); computePi(t, 1008); computePi(t, 1012); long b = 10; for(int i = 0; i < 7; i++) { Util.printBitSkipped(b); computePi(t, b - 4); computePi(t, b); computePi(t, b + 4); b *= 10; } } }
10,517
29.575581
101
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Modular.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.examples.pi.math; /** Modular arithmetics */ public class Modular { static final long MAX_SQRT_LONG = (long)Math.sqrt(Long.MAX_VALUE); /** Compute 2^e mod n */ public static long mod(long e, long n) { final int HALF = (63 - Long.numberOfLeadingZeros(n)) >> 1; final int FULL = HALF << 1; final long ONES = (1 << HALF) - 1; long r = 2; for (long mask = Long.highestOneBit(e) >> 1; mask > 0; mask >>= 1) { if (r <= MAX_SQRT_LONG) { r *= r; if (r >= n) r %= n; } else { // r^2 will overflow final long high = r >>> HALF; final long low = r &= ONES; r *= r; if (r >= n) r %= n; if (high != 0) { long s = high * high; if (s >= n) s %= n; for(int i = 0; i < FULL; i++) if ((s <<= 1) >= n) s -= n; if (low == 0) r = s; else { long t = high * low; if (t >= n) t %= n; for(int i = -1; i < HALF; i++) if ((t <<= 1) >= n) t -= n; r += s; if (r >= n) r -= n; r += t; if (r >= n) r -= n; } } } if ((e & mask) != 0) { r <<= 1; if (r >= n) r -= n; } } return r; } /** Given x in [0,1) and a in (-1,1), * return (x, a) mod 1.0. */ public static double addMod(double x, final double a) { x += a; return x >= 1? x - 1: x < 0? x + 1: x; } /** Given 0 &lt; x &lt; y, * return x^(-1) mod y. */ public static long modInverse(final long x, final long y) { if (x == 1) return 1; long a = 1; long b = 0; long c = x; long u = 0; long v = 1; long w = y; for(;;) { { final long q = w/c; w -= q*c; u -= q*a; if (w == 1) return u > 0? u: u + y; v -= q*b; } { final long q = c/w; c -= q*w; a -= q*u; if (c == 1) return a > 0? a: a + y; b -= q*v; } } } }
2,914
24.79646
75
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/ArithmeticProgression.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.examples.pi.math; import org.apache.hadoop.examples.pi.Util; /** An arithmetic progression */ public class ArithmeticProgression implements Comparable<ArithmeticProgression> { /** A symbol */ public final char symbol; /** Starting value */ public final long value; /** Difference between terms */ public final long delta; /** Ending value */ public final long limit; /** Constructor */ public ArithmeticProgression(char symbol, long value, long delta, long limit) { if (delta == 0) throw new IllegalArgumentException("delta == 0"); this.symbol = symbol; this.value = value; this.delta = delta; this.limit = limit; } /** {@inheritDoc} */ @Override public boolean equals(Object obj) { if (this == obj) return true; else if (obj != null && obj instanceof ArithmeticProgression) { final ArithmeticProgression that = (ArithmeticProgression)obj; if (this.symbol != that.symbol) throw new IllegalArgumentException("this.symbol != that.symbol, this=" + this + ", that=" + that); return this.value == that.value && this.delta == that.delta && this.limit == that.limit; } throw new IllegalArgumentException(obj == null? "obj == null": "obj.getClass()=" + obj.getClass()); } /** Not supported */ public int hashCode() { throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public int compareTo(ArithmeticProgression that) { if (this.symbol != that.symbol) throw new IllegalArgumentException("this.symbol != that.symbol, this=" + this + ", that=" + that); if (this.delta != that.delta) throw new IllegalArgumentException("this.delta != that.delta, this=" + this + ", that=" + that); final long d = this.limit - that.limit; return d > 0? 1: d == 0? 0: -1; } /** Does this contain that? */ boolean contains(ArithmeticProgression that) { if (this.symbol != that.symbol) throw new IllegalArgumentException("this.symbol != that.symbol, this=" + this + ", that=" + that); if (this.delta == that.delta) { if (this.value == that.value) return this.getSteps() >= that.getSteps(); else if (this.delta < 0) return this.value > that.value && this.limit <= that.limit; else if (this.delta > 0) return this.value < that.value && this.limit >= that.limit; } return false; } /** Skip some steps */ long skip(long steps) { if (steps < 0) throw new IllegalArgumentException("steps < 0, steps=" + steps); return value + steps*delta; } /** Get the number of steps */ public long getSteps() { return (limit - value)/delta; } /** {@inheritDoc} */ @Override public String toString() { return symbol + ":value=" + value + ",delta=" + delta + ",limit=" + limit; } /** Convert a String to an ArithmeticProgression. */ static ArithmeticProgression valueOf(final String s) { int i = 2; int j = s.indexOf(",delta="); final long value = Util.parseLongVariable("value", s.substring(2, j)); i = j + 1; j = s.indexOf(",limit="); final long delta = Util.parseLongVariable("delta", s.substring(i, j)); i = j + 1; final long limit = Util.parseLongVariable("limit", s.substring(i)); return new ArithmeticProgression(s.charAt(0), value, delta, limit); } }
4,250
32.738095
81
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.examples.pi.math; import java.math.BigInteger; /** Support 124-bit integer arithmetic. */ class LongLong { static final int BITS_PER_LONG = 62; static final int MID = BITS_PER_LONG >> 1; static final int SIZE = BITS_PER_LONG << 1; static final long FULL_MASK = (1L << BITS_PER_LONG) - 1; static final long LOWER_MASK = FULL_MASK >>> MID; static final long UPPER_MASK = LOWER_MASK << MID; private long d0; private long d1; /** Set the values. */ LongLong set(long d0, long d1) { this.d0 = d0; this.d1 = d1; return this; } /** And operation (&). */ long and(long mask) { return d0 & mask; } /** Shift right operation (<<). */ long shiftRight(int n) { return (d1 << (BITS_PER_LONG - n)) + (d0 >>> n); } /** Plus equal operation (+=). */ LongLong plusEqual(LongLong that) { this.d0 += that.d0; this.d1 += that.d1; return this; } /** Convert this to a BigInteger. */ BigInteger toBigInteger() { return BigInteger.valueOf(d1).shiftLeft(BITS_PER_LONG).add(BigInteger.valueOf(d0)); } /** {@inheritDoc} */ @Override public String toString() { final int remainder = BITS_PER_LONG % 4; return String.format("%x*2^%d + %016x", d1<<remainder, BITS_PER_LONG-remainder, d0); } /** Compute a*b and store the result to r. * @return r */ static LongLong multiplication(final LongLong r, final long a, final long b) { /* final long x0 = a & LOWER_MASK; final long x1 = (a & UPPER_MASK) >> MID; final long y0 = b & LOWER_MASK; final long y1 = (b & UPPER_MASK) >> MID; final long t = (x0 + x1)*(y0 + y1); final long u = (x0 - x1)*(y0 - y1); final long v = x1*y1; final long tmp = (t - u)>>>1; result.d0 = ((t + u)>>>1) - v + ((tmp << MID) & FULL_MASK);; result.d1 = v + (tmp >> MID); return result; */ final long a_lower = a & LOWER_MASK; final long a_upper = (a & UPPER_MASK) >> MID; final long b_lower = b & LOWER_MASK; final long b_upper = (b & UPPER_MASK) >> MID; final long tmp = a_lower*b_upper + a_upper*b_lower; r.d0 = a_lower*b_lower + ((tmp << MID) & FULL_MASK); r.d1 = a_upper*b_upper + (tmp >> MID); return r; } }
3,047
28.882353
88
java
hadoop
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/Summation.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.examples.pi.math; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.examples.pi.Combinable; import org.apache.hadoop.examples.pi.Container; import org.apache.hadoop.examples.pi.Util; /** Represent the summation \sum \frac{2^e \mod n}{n}. */ public class Summation implements Container<Summation>, Combinable<Summation> { /** Variable n in the summation. */ public final ArithmeticProgression N; /** Variable e in the summation. */ public final ArithmeticProgression E; private Double value = null; /** Constructor */ public Summation(ArithmeticProgression N, ArithmeticProgression E) { if (N.getSteps() != E.getSteps()) { throw new IllegalArgumentException("N.getSteps() != E.getSteps()," + "\n N.getSteps()=" + N.getSteps() + ", N=" + N + "\n E.getSteps()=" + E.getSteps() + ", E=" + E); } this.N = N; this.E = E; } /** Constructor */ Summation(long valueN, long deltaN, long valueE, long deltaE, long limitE) { this(valueN, deltaN, valueN - deltaN*((valueE - limitE)/deltaE), valueE, deltaE, limitE); } /** Constructor */ Summation(long valueN, long deltaN, long limitN, long valueE, long deltaE, long limitE) { this(new ArithmeticProgression('n', valueN, deltaN, limitN), new ArithmeticProgression('e', valueE, deltaE, limitE)); } /** {@inheritDoc} */ @Override public Summation getElement() {return this;} /** Return the number of steps of this summation */ long getSteps() {return E.getSteps();} /** Return the value of this summation */ public Double getValue() {return value;} /** Set the value of this summation */ public void setValue(double v) {this.value = v;} /** {@inheritDoc} */ @Override public String toString() { return "[" + N + "; " + E + (value == null? "]": "]value=" + Double.doubleToLongBits(value)); } /** {@inheritDoc} */ @Override public boolean equals(Object obj) { if (obj == this) return true; if (obj != null && obj instanceof Summation) { final Summation that = (Summation)obj; return this.N.equals(that.N) && this.E.equals(that.E); } throw new IllegalArgumentException(obj == null? "obj == null": "obj.getClass()=" + obj.getClass()); } /** Not supported */ @Override public int hashCode() { throw new UnsupportedOperationException(); } /** Covert a String to a Summation. */ public static Summation valueOf(final String s) { int i = 1; int j = s.indexOf("; ", i); if (j < 0) throw new IllegalArgumentException("i=" + i + ", j=" + j + " < 0, s=" + s); final ArithmeticProgression N = ArithmeticProgression.valueOf(s.substring(i, j)); i = j + 2; j = s.indexOf("]", i); if (j < 0) throw new IllegalArgumentException("i=" + i + ", j=" + j + " < 0, s=" + s); final ArithmeticProgression E = ArithmeticProgression.valueOf(s.substring(i, j)); final Summation sigma = new Summation(N, E); i = j + 1; if (s.length() > i) { final String value = Util.parseStringVariable("value", s.substring(i)); sigma.setValue(value.indexOf('.') < 0? Double.longBitsToDouble(Long.parseLong(value)): Double.parseDouble(value)); } return sigma; } /** Compute the value of the summation. */ public double compute() { if (value == null) value = N.limit <= MAX_MODULAR? compute_modular(): compute_montgomery(); return value; } private static final long MAX_MODULAR = 1L << 32; /** Compute the value using {@link Modular#mod(long, long)}. */ double compute_modular() { long e = E.value; long n = N.value; double s = 0; for(; e > E.limit; e += E.delta) { s = Modular.addMod(s, Modular.mod(e, n)/(double)n); n += N.delta; } return s; } final Montgomery montgomery = new Montgomery(); /** Compute the value using {@link Montgomery#mod(long)}. */ double compute_montgomery() { long e = E.value; long n = N.value; double s = 0; for(; e > E.limit; e += E.delta) { s = Modular.addMod(s, montgomery.set(n).mod(e)/(double)n); n += N.delta; } return s; } /** {@inheritDoc} */ @Override public int compareTo(Summation that) { final int de = this.E.compareTo(that.E); if (de != 0) return de; return this.N.compareTo(that.N); } /** {@inheritDoc} */ @Override public Summation combine(Summation that) { if (this.N.delta != that.N.delta || this.E.delta != that.E.delta) throw new IllegalArgumentException( "this.N.delta != that.N.delta || this.E.delta != that.E.delta" + ",\n this=" + this + ",\n that=" + that); if (this.E.limit == that.E.value && this.N.limit == that.N.value) { final double v = Modular.addMod(this.value, that.value); final Summation s = new Summation( new ArithmeticProgression(N.symbol, N.value, N.delta, that.N.limit), new ArithmeticProgression(E.symbol, E.value, E.delta, that.E.limit)); s.setValue(v); return s; } return null; } /** Find the remaining terms. */ public <T extends Container<Summation>> List<Summation> remainingTerms(List<T> sorted) { final List<Summation> results = new ArrayList<Summation>(); Summation remaining = this; if (sorted != null) for(Container<Summation> c : sorted) { final Summation sigma = c.getElement(); if (!remaining.contains(sigma)) throw new IllegalArgumentException("!remaining.contains(s)," + "\n remaining = " + remaining + "\n s = " + sigma + "\n this = " + this + "\n sorted = " + sorted); final Summation s = new Summation(sigma.N.limit, N.delta, remaining.N.limit, sigma.E.limit, E.delta, remaining.E.limit); if (s.getSteps() > 0) results.add(s); remaining = new Summation(remaining.N.value, N.delta, sigma.N.value, remaining.E.value, E.delta, sigma.E.value); } if (remaining.getSteps() > 0) results.add(remaining); return results; } /** Does this contains that? */ public boolean contains(Summation that) { return this.N.contains(that.N) && this.E.contains(that.E); } /** Partition the summation. */ public Summation[] partition(final int nParts) { final Summation[] parts = new Summation[nParts]; final long steps = (E.limit - E.value)/E.delta + 1; long prevN = N.value; long prevE = E.value; for(int i = 1; i < parts.length; i++) { final long k = (i * steps)/parts.length; final long currN = N.skip(k); final long currE = E.skip(k); parts[i - 1] = new Summation( new ArithmeticProgression(N.symbol, prevN, N.delta, currN), new ArithmeticProgression(E.symbol, prevE, E.delta, currE)); prevN = currN; prevE = currE; } parts[parts.length - 1] = new Summation( new ArithmeticProgression(N.symbol, prevN, N.delta, N.limit), new ArithmeticProgression(E.symbol, prevE, E.delta, E.limit)); return parts; } }
8,094
32.450413
98
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs; import java.nio.ByteBuffer; import java.util.Arrays; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.request.CREATE3Request; import org.apache.hadoop.nfs.nfs3.request.SetAttr3; import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; import org.apache.hadoop.oncrpc.RegistrationClient; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcReply; import org.apache.hadoop.oncrpc.RpcUtil; import org.apache.hadoop.oncrpc.SimpleTcpClient; import org.apache.hadoop.oncrpc.SimpleTcpClientHandler; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.CredentialsNone; import org.apache.hadoop.oncrpc.security.VerifierNone; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelHandlerContext; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelPipelineFactory; import org.jboss.netty.channel.Channels; import org.jboss.netty.channel.MessageEvent; public class TestOutOfOrderWrite { public final static Log LOG = LogFactory.getLog(TestOutOfOrderWrite.class); static FileHandle handle = null; static Channel channel; static byte[] data1 = new byte[1000]; static byte[] data2 = new byte[1000]; static byte[] data3 = new byte[1000]; static XDR create() { XDR request = new XDR(); RpcCall.getInstance(0x8000004c, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(), new VerifierNone()).write(request); SetAttr3 objAttr = new SetAttr3(); CREATE3Request createReq = new CREATE3Request(new FileHandle("/"), "out-of-order-write" + System.currentTimeMillis(), 0, objAttr, 0); createReq.serialize(request); return request; } static XDR write(FileHandle handle, int xid, long offset, int count, byte[] data) { XDR request = new XDR(); RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(), new VerifierNone()).write(request); WRITE3Request write1 = new WRITE3Request(handle, offset, count, WriteStableHow.UNSTABLE, ByteBuffer.wrap(data)); write1.serialize(request); return request; } static void testRequest(XDR request) { RegistrationClient registrationClient = new RegistrationClient("localhost", Nfs3Constant.SUN_RPCBIND, request); registrationClient.run(); } static class WriteHandler extends SimpleTcpClientHandler { public WriteHandler(XDR request) { super(request); } @Override public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) { // Get handle from create response ChannelBuffer buf = (ChannelBuffer) e.getMessage(); XDR rsp = new XDR(buf.array()); if (rsp.getBytes().length == 0) { LOG.info("rsp length is zero, why?"); return; } LOG.info("rsp length=" + rsp.getBytes().length); RpcReply reply = RpcReply.read(rsp); int xid = reply.getXid(); // Only process the create response if (xid != 0x8000004c) { return; } int status = rsp.readInt(); if (status != Nfs3Status.NFS3_OK) { LOG.error("Create failed, status =" + status); return; } LOG.info("Create succeeded"); rsp.readBoolean(); // value follow handle = new FileHandle(); handle.deserialize(rsp); channel = e.getChannel(); } } static class WriteClient extends SimpleTcpClient { public WriteClient(String host, int port, XDR request, Boolean oneShot) { super(host, port, request, oneShot); } @Override protected ChannelPipelineFactory setPipelineFactory() { this.pipelineFactory = new ChannelPipelineFactory() { @Override public ChannelPipeline getPipeline() { return Channels.pipeline( RpcUtil.constructRpcFrameDecoder(), new WriteHandler(request)); } }; return this.pipelineFactory; } } public static void main(String[] args) throws InterruptedException { Arrays.fill(data1, (byte) 7); Arrays.fill(data2, (byte) 8); Arrays.fill(data3, (byte) 9); // NFS3 Create request NfsConfiguration conf = new NfsConfiguration(); WriteClient client = new WriteClient("localhost", conf.getInt( NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT), create(), false); client.run(); while (handle == null) { Thread.sleep(1000); System.out.println("handle is still null..."); } LOG.info("Send write1 request"); XDR writeReq; writeReq = write(handle, 0x8000005c, 2000, 1000, data3); Nfs3Utils.writeChannel(channel, writeReq, 1); writeReq = write(handle, 0x8000005d, 1000, 1000, data2); Nfs3Utils.writeChannel(channel, writeReq, 2); writeReq = write(handle, 0x8000005e, 0, 1000, data1); Nfs3Utils.writeChannel(channel, writeReq, 3); // TODO: convert to Junit test, and validate result automatically } }
6,402
34.181319
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs; import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; import java.net.InetAddress; import java.net.UnknownHostException; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.CredentialsNone; import org.apache.hadoop.oncrpc.security.VerifierNone; // TODO: convert this to Junit public class TestUdpServer { static void testRequest(XDR request, XDR request2) { try { DatagramSocket clientSocket = new DatagramSocket(); InetAddress IPAddress = InetAddress.getByName("localhost"); byte[] sendData = request.getBytes(); byte[] receiveData = new byte[65535]; DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length, IPAddress, Nfs3Constant.SUN_RPCBIND); clientSocket.send(sendPacket); DatagramPacket receivePacket = new DatagramPacket(receiveData, receiveData.length); clientSocket.receive(receivePacket); clientSocket.close(); } catch (UnknownHostException e) { System.err.println("Don't know about host: localhost."); System.exit(1); } catch (IOException e) { System.err.println("Couldn't get I/O for " + "the connection to: localhost."); System.exit(1); } } public static void main(String[] args) throws InterruptedException { Thread t1 = new Runtest1(); // TODO: cleanup //Thread t2 = new Runtest2(); t1.start(); //t2.start(); t1.join(); //t2.join(); //testDump(); } static class Runtest1 extends Thread { @Override public void run() { testGetportMount(); } } static class Runtest2 extends Thread { @Override public void run() { testDump(); } } static void createPortmapXDRheader(XDR xdr_out, int procedure) { // Make this a method RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(), new VerifierNone()).write(xdr_out); } static void testGetportMount() { XDR xdr_out = new XDR(); createPortmapXDRheader(xdr_out, 3); xdr_out.writeInt(100005); xdr_out.writeInt(1); xdr_out.writeInt(6); xdr_out.writeInt(0); XDR request2 = new XDR(); createPortmapXDRheader(xdr_out, 3); request2.writeInt(100005); request2.writeInt(1); request2.writeInt(6); request2.writeInt(0); testRequest(xdr_out, request2); } static void testGetport() { XDR xdr_out = new XDR(); createPortmapXDRheader(xdr_out, 3); xdr_out.writeInt(100003); xdr_out.writeInt(3); xdr_out.writeInt(6); xdr_out.writeInt(0); XDR request2 = new XDR(); createPortmapXDRheader(xdr_out, 3); request2.writeInt(100003); request2.writeInt(3); request2.writeInt(6); request2.writeInt(0); testRequest(xdr_out, request2); } static void testDump() { XDR xdr_out = new XDR(); createPortmapXDRheader(xdr_out, 4); testRequest(xdr_out, xdr_out); } }
3,895
27.859259
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs; import java.io.IOException; import java.net.InetAddress; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd; import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3; import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; import org.apache.hadoop.oncrpc.XDR; import org.junit.Test; public class TestMountd { public static final Log LOG = LogFactory.getLog(TestMountd.class); @Test public void testStart() throws IOException { // Start minicluster NfsConfiguration config = new NfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1) .build(); cluster.waitActive(); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); // Start nfs Nfs3 nfs3 = new Nfs3(config); nfs3.startServiceInternal(false); RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd() .getRpcProgram(); mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost")); RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); nfsd.nullProcedure(); cluster.shutdown(); } }
2,206
33.484375
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.EOFException; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.EnumSet; import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.request.ACCESS3Request; import org.apache.hadoop.nfs.nfs3.request.COMMIT3Request; import org.apache.hadoop.nfs.nfs3.request.CREATE3Request; import org.apache.hadoop.nfs.nfs3.request.FSINFO3Request; import org.apache.hadoop.nfs.nfs3.request.FSSTAT3Request; import org.apache.hadoop.nfs.nfs3.request.GETATTR3Request; import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request; import org.apache.hadoop.nfs.nfs3.request.MKDIR3Request; import org.apache.hadoop.nfs.nfs3.request.PATHCONF3Request; import org.apache.hadoop.nfs.nfs3.request.READ3Request; import org.apache.hadoop.nfs.nfs3.request.READDIR3Request; import org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request; import org.apache.hadoop.nfs.nfs3.request.READLINK3Request; import org.apache.hadoop.nfs.nfs3.request.REMOVE3Request; import org.apache.hadoop.nfs.nfs3.request.RENAME3Request; import org.apache.hadoop.nfs.nfs3.request.RMDIR3Request; import org.apache.hadoop.nfs.nfs3.request.SETATTR3Request; import org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request; import org.apache.hadoop.nfs.nfs3.request.SetAttr3; import org.apache.hadoop.nfs.nfs3.request.SetAttr3.SetAttrField; import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response; import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response; import org.apache.hadoop.nfs.nfs3.response.CREATE3Response; import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response; import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response; import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response; import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response; import org.apache.hadoop.nfs.nfs3.response.MKDIR3Response; import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response; import org.apache.hadoop.nfs.nfs3.response.READ3Response; import org.apache.hadoop.nfs.nfs3.response.READDIR3Response; import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response; import org.apache.hadoop.nfs.nfs3.response.READLINK3Response; import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response; import org.apache.hadoop.nfs.nfs3.response.RENAME3Response; import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response; import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response; import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.apache.hadoop.security.IdMappingConstant; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; import org.jboss.netty.channel.Channel; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; /** * Tests for {@link RpcProgramNfs3} */ public class TestRpcProgramNfs3 { static DistributedFileSystem hdfs; static MiniDFSCluster cluster = null; static NfsConfiguration config = new NfsConfiguration(); static HdfsAdmin dfsAdmin; static NameNode nn; static Nfs3 nfs; static RpcProgramNfs3 nfsd; static SecurityHandler securityHandler; static SecurityHandler securityHandlerUnpriviledged; static String testdir = "/tmp"; private static final String TEST_KEY = "test_key"; private static FileSystemTestHelper fsHelper; private static File testRootDir; @BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set("fs.permissions.umask-mode", "u=rwx,g=,o="); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserGroupConfKey(currentUser), "*"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserIpConfKey(currentUser), "*"); fsHelper = new FileSystemTestHelper(); // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); dfsAdmin = new HdfsAdmin(cluster.getURI(), config); // Use ephemeral ports in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); // Start NFS with allowed.hosts set to "* rw" config.set("dfs.nfs.exports.allowed.hosts", "* rw"); nfs = new Nfs3(config); nfs.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs.getRpcProgram(); hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider()); DFSTestUtil.createKey(TEST_KEY, cluster, config); // Mock SecurityHandler which returns system user.name securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn(currentUser); // Mock SecurityHandler which returns a dummy username "harry" securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry"); } @AfterClass public static void shutdown() throws Exception { if (cluster != null) { cluster.shutdown(); } } @Before public void createFiles() throws IllegalArgumentException, IOException { hdfs.delete(new Path(testdir), true); hdfs.mkdirs(new Path(testdir)); hdfs.mkdirs(new Path(testdir + "/foo")); DFSTestUtil.createFile(hdfs, new Path(testdir + "/bar"), 0, (short) 1, 0); } @Test(timeout = 60000) public void testGetattr() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); GETATTR3Request req = new GETATTR3Request(handle); req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. GETATTR3Response response2 = nfsd.getattr(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testSetattr() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null, EnumSet.of(SetAttrField.UID)); SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null); req.serialize(xdr_req); // Attempt by an unprivileged user should fail. SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testLookup() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar"); XDR xdr_req = new XDR(); lookupReq.serialize(xdr_req); // Attempt by an unpriviledged user should fail. LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testAccess() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); ACCESS3Request req = new ACCESS3Request(handle); req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. ACCESS3Response response2 = nfsd.access(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testReadlink() throws Exception { // Create a symlink first. HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(), "bar"); req.serialize(xdr_req); SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response.getStatus()); // Now perform readlink operations. FileHandle handle2 = response.getObjFileHandle(); XDR xdr_req2 = new XDR(); READLINK3Request req2 = new READLINK3Request(handle2); req2.serialize(xdr_req2); // Attempt by an unpriviledged user should fail. READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. READLINK3Response response2 = nfsd.readlink(xdr_req2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testRead() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); READ3Request readReq = new READ3Request(handle, 0, 5); XDR xdr_req = new XDR(); readReq.serialize(xdr_req); // Attempt by an unpriviledged user should fail. READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 120000) public void testEncryptedReadWrite() throws Exception { final int len = 8192; final Path zone = new Path("/zone"); hdfs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone, TEST_KEY); final byte[] buffer = new byte[len]; for (int i = 0; i < len; i++) { buffer[i] = (byte) i; } final String encFile1 = "/zone/myfile"; createFileUsingNfs(encFile1, buffer); commit(encFile1, len); assertArrayEquals("encFile1 not equal", getFileContentsUsingNfs(encFile1, len), getFileContentsUsingDfs(encFile1, len)); /* * Same thing except this time create the encrypted file using DFS. */ final String encFile2 = "/zone/myfile2"; final Path encFile2Path = new Path(encFile2); DFSTestUtil.createFile(hdfs, encFile2Path, len, (short) 1, 0xFEED); assertArrayEquals("encFile2 not equal", getFileContentsUsingNfs(encFile2, len), getFileContentsUsingDfs(encFile2, len)); } private void createFileUsingNfs(String fileName, byte[] buffer) throws Exception { DFSTestUtil.createFile(hdfs, new Path(fileName), 0, (short) 1, 0); final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName); final long dirId = status.getFileId(); final FileHandle handle = new FileHandle(dirId); final WRITE3Request writeReq = new WRITE3Request(handle, 0, buffer.length, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer)); final XDR xdr_req = new XDR(); writeReq.serialize(xdr_req); final WRITE3Response response = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect response: ", null, response); } private byte[] getFileContentsUsingNfs(String fileName, int len) throws Exception { final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName); final long dirId = status.getFileId(); final FileHandle handle = new FileHandle(dirId); final READ3Request readReq = new READ3Request(handle, 0, len); final XDR xdr_req = new XDR(); readReq.serialize(xdr_req); final READ3Response response = nfsd.read(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code: ", Nfs3Status.NFS3_OK, response.getStatus()); assertTrue("expected full read", response.isEof()); return response.getData().array(); } private byte[] getFileContentsUsingDfs(String fileName, int len) throws Exception { final FSDataInputStream in = hdfs.open(new Path(fileName)); final byte[] ret = new byte[len]; in.readFully(ret); try { in.readByte(); Assert.fail("expected end of file"); } catch (EOFException e) { // expected. Unfortunately there is no associated message to check } in.close(); return ret; } private void commit(String fileName, int len) throws Exception { final HdfsFileStatus status = nn.getRpcServer().getFileInfo(fileName); final long dirId = status.getFileId(); final FileHandle handle = new FileHandle(dirId); final XDR xdr_req = new XDR(); final COMMIT3Request req = new COMMIT3Request(handle, 0, len); req.serialize(xdr_req); Channel ch = Mockito.mock(Channel.class); COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(), ch, 1, securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect COMMIT3Response:", null, response2); } @Test(timeout = 60000) public void testWrite() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); byte[] buffer = new byte[10]; for (int i = 0; i < 10; i++) { buffer[i] = (byte) i; } WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer)); XDR xdr_req = new XDR(); writeReq.serialize(xdr_req); // Attempt by an unpriviledged user should fail. WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect response:", null, response2); } @Test(timeout = 60000) public void testCreate() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); CREATE3Request req = new CREATE3Request(handle, "fubar", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0); req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. CREATE3Response response2 = nfsd.create(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testMkdir() throws Exception {//FixME HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3()); req.serialize(xdr_req); // Attempt to mkdir by an unprivileged user should fail. MKDIR3Response response1 = nfsd.mkdir(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); XDR xdr_req2 = new XDR(); MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3()); req2.serialize(xdr_req2); // Attempt to mkdir by a privileged user should pass. MKDIR3Response response2 = nfsd.mkdir(xdr_req2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testSymlink() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(), "bar"); req.serialize(xdr_req); // Attempt by an unprivileged user should fail. SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a privileged user should pass. SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testRemove() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); REMOVE3Request req = new REMOVE3Request(handle, "bar"); req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. REMOVE3Response response1 = nfsd.remove(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. REMOVE3Response response2 = nfsd.remove(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testRmdir() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); RMDIR3Request req = new RMDIR3Request(handle, "foo"); req.serialize(xdr_req); // Attempt by an unprivileged user should fail. RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a privileged user should pass. RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testRename() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); RENAME3Request req = new RENAME3Request(handle, "bar", handle, "fubar"); req.serialize(xdr_req); // Attempt by an unprivileged user should fail. RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a privileged user should pass. RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testReaddir() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); READDIR3Request req = new READDIR3Request(handle, 0, 0, 100); req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. READDIR3Response response2 = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testReaddirplus() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2); req.serialize(xdr_req); // Attempt by an unprivileged user should fail. READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a privileged user should pass. READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testFsstat() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); FSSTAT3Request req = new FSSTAT3Request(handle); req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. FSSTAT3Response response2 = nfsd.fsstat(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testFsinfo() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); FSINFO3Request req = new FSINFO3Request(handle); req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. FSINFO3Response response2 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testPathconf() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); PATHCONF3Request req = new PATHCONF3Request(handle); req.serialize(xdr_req); // Attempt by an unpriviledged user should fail. PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. PATHCONF3Response response2 = nfsd.pathconf(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); } @Test(timeout = 60000) public void testCommit() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); FileHandle handle = new FileHandle(dirId); XDR xdr_req = new XDR(); COMMIT3Request req = new COMMIT3Request(handle, 0, 5); req.serialize(xdr_req); Channel ch = Mockito.mock(Channel.class); // Attempt by an unpriviledged user should fail. COMMIT3Response response1 = nfsd.commit(xdr_req.asReadOnlyWrap(), ch, 1, securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a priviledged user should pass. COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(), ch, 1, securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect COMMIT3Response:", null, response2); } @Test(timeout=1000) public void testIdempotent() { Object[][] procedures = { { Nfs3Constant.NFSPROC3.NULL, 1 }, { Nfs3Constant.NFSPROC3.GETATTR, 1 }, { Nfs3Constant.NFSPROC3.SETATTR, 1 }, { Nfs3Constant.NFSPROC3.LOOKUP, 1 }, { Nfs3Constant.NFSPROC3.ACCESS, 1 }, { Nfs3Constant.NFSPROC3.READLINK, 1 }, { Nfs3Constant.NFSPROC3.READ, 1 }, { Nfs3Constant.NFSPROC3.WRITE, 1 }, { Nfs3Constant.NFSPROC3.CREATE, 0 }, { Nfs3Constant.NFSPROC3.MKDIR, 0 }, { Nfs3Constant.NFSPROC3.SYMLINK, 0 }, { Nfs3Constant.NFSPROC3.MKNOD, 0 }, { Nfs3Constant.NFSPROC3.REMOVE, 0 }, { Nfs3Constant.NFSPROC3.RMDIR, 0 }, { Nfs3Constant.NFSPROC3.RENAME, 0 }, { Nfs3Constant.NFSPROC3.LINK, 0 }, { Nfs3Constant.NFSPROC3.READDIR, 1 }, { Nfs3Constant.NFSPROC3.READDIRPLUS, 1 }, { Nfs3Constant.NFSPROC3.FSSTAT, 1 }, { Nfs3Constant.NFSPROC3.FSINFO, 1 }, { Nfs3Constant.NFSPROC3.PATHCONF, 1 }, { Nfs3Constant.NFSPROC3.COMMIT, 1 } }; for (Object[] procedure : procedures) { boolean idempotent = procedure[1].equals(Integer.valueOf(1)); Nfs3Constant.NFSPROC3 proc = (Nfs3Constant.NFSPROC3)procedure[0]; if (idempotent) { Assert.assertTrue(("Procedure " + proc + " should be idempotent"), proc.isIdempotent()); } else { Assert.assertFalse(("Procedure " + proc + " should be non-idempotent"), proc.isIdempotent()); } } } @Test public void testDeprecatedKeys() { NfsConfiguration conf = new NfsConfiguration(); conf.setInt("nfs3.server.port", 998); assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, 0) == 998); conf.setInt("nfs3.mountd.port", 999); assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, 0) == 999); conf.set("dfs.nfs.exports.allowed.hosts", "host1"); assertTrue(conf.get(CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY) .equals("host1")); conf.setInt("dfs.nfs.exports.cache.expirytime.millis", 1000); assertTrue(conf.getInt( Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY, 0) == 1000); conf.setInt("hadoop.nfs.userupdate.milly", 10); assertTrue(conf.getInt(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY, 0) == 10); conf.set("dfs.nfs3.dump.dir", "/nfs/tmp"); assertTrue(conf.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY).equals( "/nfs/tmp")); conf.setBoolean("dfs.nfs3.enableDump", false); assertTrue(conf.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY, true) == false); conf.setInt("dfs.nfs3.max.open.files", 500); assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 0) == 500); conf.setInt("dfs.nfs3.stream.timeout", 6000); assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY, 0) == 6000); conf.set("dfs.nfs3.export.point", "/dir1"); assertTrue(conf.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY).equals("/dir1")); } }
33,708
39.859394
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.junit.Assert.assertEquals; import java.io.IOException; import java.net.InetSocketAddress; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; public class TestClientAccessPrivilege { static MiniDFSCluster cluster = null; static NfsConfiguration config = new NfsConfiguration(); static DistributedFileSystem hdfs; static NameNode nn; static String testdir = "/tmp"; static SecurityHandler securityHandler; @BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserGroupConfKey(currentUser), "*"); config.set(DefaultImpersonationProvider.getTestProvider() .getProxySuperuserIpConfKey(currentUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); // Use ephemeral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); } @AfterClass public static void shutdown() throws Exception { if (cluster != null) { cluster.shutdown(); } } @Before public void createFiles() throws IllegalArgumentException, IOException { hdfs.delete(new Path(testdir), true); hdfs.mkdirs(new Path(testdir)); DFSTestUtil.createFile(hdfs, new Path(testdir + "/f1"), 0, (short) 1, 0); } @Test(timeout = 60000) public void testClientAccessPrivilegeForRemove() throws Exception { // Configure ro access for nfs1 service config.set("dfs.nfs.exports.allowed.hosts", "* ro"); // Start nfs Nfs3 nfs = new Nfs3(config); nfs.startServiceInternal(false); RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs.getRpcProgram(); // Create a remove request HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeString("f1"); // Remove operation REMOVE3Response response = nfsd.remove(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); // Assert on return code assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response.getStatus()); } }
4,292
34.479339
77
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import org.junit.Test; import java.io.IOException; import org.apache.hadoop.nfs.NfsFileType; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.mockito.Mockito; public class TestNfs3Utils { @Test public void testGetAccessRightsForUserGroup() throws IOException { Nfs3FileAttributes attr = Mockito.mock(Nfs3FileAttributes.class); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(3); Mockito.when(attr.getMode()).thenReturn(448); // 700 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); assertEquals("No access should be allowed as UID does not match attribute over mode 700", 0, Nfs3Utils.getAccessRightsForUserGroup(3, 3, null, attr)); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(3); Mockito.when(attr.getMode()).thenReturn(56); // 070 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); assertEquals("No access should be allowed as GID does not match attribute over mode 070", 0, Nfs3Utils.getAccessRightsForUserGroup(2, 4, null, attr)); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(3); Mockito.when(attr.getMode()).thenReturn(7); // 007 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); assertEquals("Access should be allowed as mode is 007 and UID/GID do not match", 61 /* RWX */, Nfs3Utils.getAccessRightsForUserGroup(1, 4, new int[] {5, 6}, attr)); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(10); Mockito.when(attr.getMode()).thenReturn(288); // 440 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); assertEquals("Access should be allowed as mode is 440 and Aux GID does match", 1 /* R */, Nfs3Utils.getAccessRightsForUserGroup(3, 4, new int[] {5, 16, 10}, attr)); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(10); Mockito.when(attr.getMode()).thenReturn(448); // 700 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue()); assertEquals("Access should be allowed for dir as mode is 700 and UID does match", 31 /* Lookup */, Nfs3Utils.getAccessRightsForUserGroup(2, 4, new int[] {5, 16, 10}, attr)); assertEquals("No access should be allowed for dir as mode is 700 even though GID does match", 0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, attr)); assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match", 0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, attr)); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(10); Mockito.when(attr.getMode()).thenReturn(457); // 711 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue()); assertEquals("Access should be allowed for dir as mode is 711 and GID matches", 2 /* Lookup */, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 11}, attr)); } }
4,102
50.2875
100
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.concurrent.ConcurrentNavigableMap; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS; import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.request.CREATE3Request; import org.apache.hadoop.nfs.nfs3.request.READ3Request; import org.apache.hadoop.nfs.nfs3.request.SetAttr3; import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; import org.apache.hadoop.nfs.nfs3.response.CREATE3Response; import org.apache.hadoop.nfs.nfs3.response.READ3Response; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.apache.hadoop.security.ShellBasedIdMapping; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; import org.jboss.netty.channel.Channel; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; public class TestWrites { @Test public void testAlterWriteRequest() throws IOException { int len = 20; byte[] data = new byte[len]; ByteBuffer buffer = ByteBuffer.wrap(data); for (int i = 0; i < len; i++) { buffer.put((byte) i); } buffer.flip(); int originalCount = buffer.array().length; WRITE3Request request = new WRITE3Request(new FileHandle(), 0, data.length, WriteStableHow.UNSTABLE, buffer); WriteCtx writeCtx1 = new WriteCtx(request.getHandle(), request.getOffset(), request.getCount(), WriteCtx.INVALID_ORIGINAL_COUNT, request.getStableHow(), request.getData(), null, 1, false, WriteCtx.DataState.NO_DUMP); Assert.assertTrue(writeCtx1.getData().array().length == originalCount); // Now change the write request OpenFileCtx.alterWriteRequest(request, 12); WriteCtx writeCtx2 = new WriteCtx(request.getHandle(), request.getOffset(), request.getCount(), originalCount, request.getStableHow(), request.getData(), null, 2, false, WriteCtx.DataState.NO_DUMP); ByteBuffer appendedData = writeCtx2.getData(); int position = appendedData.position(); int limit = appendedData.limit(); Assert.assertTrue(position == 12); Assert.assertTrue(limit - position == 8); Assert.assertTrue(appendedData.get(position) == (byte) 12); Assert.assertTrue(appendedData.get(position + 1) == (byte) 13); Assert.assertTrue(appendedData.get(position + 2) == (byte) 14); Assert.assertTrue(appendedData.get(position + 7) == (byte) 19); // Test current file write offset is at boundaries buffer.position(0); request = new WRITE3Request(new FileHandle(), 0, data.length, WriteStableHow.UNSTABLE, buffer); OpenFileCtx.alterWriteRequest(request, 1); WriteCtx writeCtx3 = new WriteCtx(request.getHandle(), request.getOffset(), request.getCount(), originalCount, request.getStableHow(), request.getData(), null, 2, false, WriteCtx.DataState.NO_DUMP); appendedData = writeCtx3.getData(); position = appendedData.position(); limit = appendedData.limit(); Assert.assertTrue(position == 1); Assert.assertTrue(limit - position == 19); Assert.assertTrue(appendedData.get(position) == (byte) 1); Assert.assertTrue(appendedData.get(position + 18) == (byte) 19); // Reset buffer position before test another boundary buffer.position(0); request = new WRITE3Request(new FileHandle(), 0, data.length, WriteStableHow.UNSTABLE, buffer); OpenFileCtx.alterWriteRequest(request, 19); WriteCtx writeCtx4 = new WriteCtx(request.getHandle(), request.getOffset(), request.getCount(), originalCount, request.getStableHow(), request.getData(), null, 2, false, WriteCtx.DataState.NO_DUMP); appendedData = writeCtx4.getData(); position = appendedData.position(); limit = appendedData.limit(); Assert.assertTrue(position == 19); Assert.assertTrue(limit - position == 1); Assert.assertTrue(appendedData.get(position) == (byte) 19); } @Test // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX, // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC. public void testCheckCommit() throws IOException { DFSClient dfsClient = Mockito.mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); Mockito.when(fos.getPos()).thenReturn((long) 0); NfsConfiguration conf = new NfsConfiguration(); conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false); OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(conf), false, conf); COMMIT_STATUS ret; // Test inactive open file context ctx.setActiveStatusForTest(false); Channel ch = Mockito.mock(Channel.class); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX); ctx.getPendingWritesForTest().put(new OffsetRange(5, 10), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE); // Test request with non zero commit offset ctx.setActiveStatusForTest(true); Mockito.when(fos.getPos()).thenReturn((long) 10); ctx.setNextOffsetForTest(10); COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false); Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); // Do_SYNC state will be updated to FINISHED after data sync ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); status = ctx.checkCommitInternal(10, ch, 1, attr, false); Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); ConcurrentNavigableMap<Long, CommitCtx> commits = ctx .getPendingCommitsForTest(); Assert.assertTrue(commits.size() == 0); ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT); Assert.assertTrue(commits.size() == 1); long key = commits.firstKey(); Assert.assertTrue(key == 11); // Test request with zero commit offset commits.remove(new Long(11)); // There is one pending write [5,10] ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT); Assert.assertTrue(commits.size() == 1); key = commits.firstKey(); Assert.assertTrue(key == 9); // Empty pending writes ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); } @Test // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS with // large file upload option. public void testCheckCommitLargeFileUpload() throws IOException { DFSClient dfsClient = Mockito.mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); Mockito.when(fos.getPos()).thenReturn((long) 0); NfsConfiguration conf = new NfsConfiguration(); conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, true); OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(conf), false, conf); COMMIT_STATUS ret; // Test inactive open file context ctx.setActiveStatusForTest(false); Channel ch = Mockito.mock(Channel.class); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX); ctx.getPendingWritesForTest().put(new OffsetRange(10, 15), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE); // Test request with non zero commit offset ctx.setActiveStatusForTest(true); Mockito.when(fos.getPos()).thenReturn((long) 8); ctx.setNextOffsetForTest(10); COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false); Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); // Do_SYNC state will be updated to FINISHED after data sync ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); // Test commit sequential writes status = ctx.checkCommitInternal(10, ch, 1, attr, false); Assert.assertTrue(status == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); // Test commit non-sequential writes ConcurrentNavigableMap<Long, CommitCtx> commits = ctx .getPendingCommitsForTest(); Assert.assertTrue(commits.size() == 1); ret = ctx.checkCommit(dfsClient, 16, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS); Assert.assertTrue(commits.size() == 1); // Test request with zero commit offset commits.remove(new Long(10)); // There is one pending write [10,15] ret = ctx.checkCommitInternal(0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); ret = ctx.checkCommitInternal(9, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); Assert.assertTrue(commits.size() == 2); // Empty pending writes. nextOffset=10, flushed pos=8 ctx.getPendingWritesForTest().remove(new OffsetRange(10, 15)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); // Empty pending writes ctx.setNextOffsetForTest((long) 8); // flushed pos = 8 ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); } @Test public void testCheckCommitAixCompatMode() throws IOException { DFSClient dfsClient = Mockito.mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); NfsConfiguration conf = new NfsConfiguration(); conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false); // Enable AIX compatibility mode. OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()), true, conf); // Test fall-through to pendingWrites check in the event that commitOffset // is greater than the number of bytes we've so far flushed. Mockito.when(fos.getPos()).thenReturn((long) 2); COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false); Assert.assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED); // Test the case when we actually have received more bytes than we're trying // to commit. ctx.getPendingWritesForTest().put(new OffsetRange(0, 10), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); Mockito.when(fos.getPos()).thenReturn((long) 10); ctx.setNextOffsetForTest((long)10); status = ctx.checkCommitInternal(5, null, 1, attr, false); Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); } @Test // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS, which // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX, // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC. public void testCheckCommitFromRead() throws IOException { DFSClient dfsClient = Mockito.mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); Mockito.when(fos.getPos()).thenReturn((long) 0); NfsConfiguration config = new NfsConfiguration(); config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false); OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(config), false, config); FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath" COMMIT_STATUS ret; WriteManager wm = new WriteManager(new ShellBasedIdMapping(config), config, false); assertTrue(wm.addOpenFileStream(h, ctx)); // Test inactive open file context ctx.setActiveStatusForTest(false); Channel ch = Mockito.mock(Channel.class); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret); assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0)); ctx.getPendingWritesForTest().put(new OffsetRange(10, 15), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret); assertEquals(Nfs3Status.NFS3ERR_IO, wm.commitBeforeRead(dfsClient, h, 0)); // Test request with non zero commit offset ctx.setActiveStatusForTest(true); Mockito.when(fos.getPos()).thenReturn((long) 10); ctx.setNextOffsetForTest((long)10); COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false); assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status); // Do_SYNC state will be updated to FINISHED after data sync ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret); assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 5)); status = ctx.checkCommitInternal(10, ch, 1, attr, true); assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret); assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 10)); ConcurrentNavigableMap<Long, CommitCtx> commits = ctx .getPendingCommitsForTest(); assertTrue(commits.size() == 0); ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret); assertEquals(0, commits.size()); // commit triggered by read doesn't wait assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 11)); // Test request with zero commit offset // There is one pending write [5,10] ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_WAIT, ret); assertEquals(0, commits.size()); assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0)); // Empty pending writes ctx.getPendingWritesForTest().remove(new OffsetRange(10, 15)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret); assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0)); } @Test // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS with large file upload option public void testCheckCommitFromReadLargeFileUpload() throws IOException { DFSClient dfsClient = Mockito.mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); Mockito.when(fos.getPos()).thenReturn((long) 0); NfsConfiguration config = new NfsConfiguration(); config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, true); OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(config), false, config); FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath" COMMIT_STATUS ret; WriteManager wm = new WriteManager(new ShellBasedIdMapping(config), config, false); assertTrue(wm.addOpenFileStream(h, ctx)); // Test inactive open file context ctx.setActiveStatusForTest(false); Channel ch = Mockito.mock(Channel.class); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret); assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0)); ctx.getPendingWritesForTest().put(new OffsetRange(10, 15), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE, ret); assertEquals(Nfs3Status.NFS3ERR_IO, wm.commitBeforeRead(dfsClient, h, 0)); // Test request with non zero commit offset ctx.setActiveStatusForTest(true); Mockito.when(fos.getPos()).thenReturn((long) 6); ctx.setNextOffsetForTest((long)10); COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false); assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status); // Do_SYNC state will be updated to FINISHED after data sync ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_FINISHED, ret); assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 5)); // Test request with sequential writes status = ctx.checkCommitInternal(9, ch, 1, attr, true); assertTrue(status == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); ret = ctx.checkCommit(dfsClient, 9, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_WAIT, ret); assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 9)); // Test request with non-sequential writes ConcurrentNavigableMap<Long, CommitCtx> commits = ctx .getPendingCommitsForTest(); assertTrue(commits.size() == 0); ret = ctx.checkCommit(dfsClient, 16, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS, ret); assertEquals(0, commits.size()); // commit triggered by read doesn't wait assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 16)); // Test request with zero commit offset // There is one pending write [10,15] ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_WAIT, ret); assertEquals(0, commits.size()); assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0)); // Empty pending writes ctx.getPendingWritesForTest().remove(new OffsetRange(10, 15)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); assertEquals(COMMIT_STATUS.COMMIT_SPECIAL_WAIT, ret); assertEquals(Nfs3Status.NFS3ERR_JUKEBOX, wm.commitBeforeRead(dfsClient, h, 0)); } private void waitWrite(RpcProgramNfs3 nfsd, FileHandle handle, int maxWaitTime) throws InterruptedException { int waitedTime = 0; OpenFileCtx ctx = nfsd.getWriteManager() .getOpenFileCtxCache().get(handle); assertTrue(ctx != null); do { Thread.sleep(3000); waitedTime += 3000; if (ctx.getPendingWritesForTest().size() == 0) { return; } } while (waitedTime < maxWaitTime); fail("Write can't finish."); } @Test public void testWriteStableHow() throws IOException, InterruptedException { NfsConfiguration config = new NfsConfiguration(); DFSClient client = null; MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); String currentUser = System.getProperty("user.name"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(currentUser), "*"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserIpConfKey(currentUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); try { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); client = new DFSClient(NameNode.getAddress(config), config); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); // Start nfs Nfs3 nfs3 = new Nfs3(config); nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); HdfsFileStatus status = client.getFileInfo("/"); FileHandle rootHandle = new FileHandle(status.getFileId()); // Create file1 CREATE3Request createReq = new CREATE3Request(rootHandle, "file1", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.serialize(createXdr); CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); FileHandle handle = createRsp.getObjHandle(); // Test DATA_SYNC byte[] buffer = new byte[10]; for (int i = 0; i < 10; i++) { buffer[i] = (byte) i; } WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer)); XDR writeXdr = new XDR(); writeReq.serialize(writeXdr); nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); waitWrite(nfsd, handle, 60000); // Readback READ3Request readReq = new READ3Request(handle, 0, 10); XDR readXdr = new XDR(); readReq.serialize(readXdr); READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertTrue(Arrays.equals(buffer, readRsp.getData().array())); // Test FILE_SYNC // Create file2 CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0); XDR createXdr2 = new XDR(); createReq2.serialize(createXdr2); CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); FileHandle handle2 = createRsp2.getObjHandle(); WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10, WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer)); XDR writeXdr2 = new XDR(); writeReq2.serialize(writeXdr2); nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); waitWrite(nfsd, handle2, 60000); // Readback READ3Request readReq2 = new READ3Request(handle2, 0, 10); XDR readXdr2 = new XDR(); readReq2.serialize(readXdr2); READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertTrue(Arrays.equals(buffer, readRsp2.getData().array())); // FILE_SYNC should sync the file size status = client.getFileInfo("/file2"); assertTrue(status.getLen() == 10); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testOOOWrites() throws IOException, InterruptedException { NfsConfiguration config = new NfsConfiguration(); MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; final int bufSize = 32; final int numOOO = 3; SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); String currentUser = System.getProperty("user.name"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(currentUser), "*"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserIpConfKey(currentUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); try { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); Nfs3 nfs3 = new Nfs3(config); nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config); HdfsFileStatus status = dfsClient.getFileInfo("/"); FileHandle rootHandle = new FileHandle(status.getFileId()); CREATE3Request createReq = new CREATE3Request(rootHandle, "out-of-order-write" + System.currentTimeMillis(), Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0); XDR createXdr = new XDR(); createReq.serialize(createXdr); CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); FileHandle handle = createRsp.getObjHandle(); byte[][] oooBuf = new byte[numOOO][bufSize]; for (int i = 0; i < numOOO; i++) { Arrays.fill(oooBuf[i], (byte) i); } for (int i = 0; i < numOOO; i++) { final long offset = (numOOO - 1 - i) * bufSize; WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize, WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i])); XDR writeXdr = new XDR(); writeReq.serialize(writeXdr); nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); } waitWrite(nfsd, handle, 60000); READ3Request readReq = new READ3Request(handle, bufSize, bufSize); XDR readXdr = new XDR(); readReq.serialize(readXdr); READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", config.getInt( NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT))); assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array())); } finally { if (cluster != null) { cluster.shutdown(); } } } @Test public void testCheckSequential() throws IOException { DFSClient dfsClient = Mockito.mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); Mockito.when(fos.getPos()).thenReturn((long) 0); NfsConfiguration config = new NfsConfiguration(); config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false); OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(config), false, config); ctx.getPendingWritesForTest().put(new OffsetRange(5, 10), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); ctx.getPendingWritesForTest().put(new OffsetRange(10, 15), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); ctx.getPendingWritesForTest().put(new OffsetRange(20, 25), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); assertTrue(!ctx.checkSequential(5, 4)); assertTrue(ctx.checkSequential(9, 5)); assertTrue(ctx.checkSequential(10, 5)); assertTrue(ctx.checkSequential(14, 5)); assertTrue(!ctx.checkSequential(15, 5)); assertTrue(!ctx.checkSequential(20, 5)); assertTrue(!ctx.checkSequential(25, 5)); assertTrue(!ctx.checkSequential(999, 5)); } }
29,530
42.94494
103
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Test; public class TestDFSClientCache { @Test public void testEviction() throws IOException { NfsConfiguration conf = new NfsConfiguration(); conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost"); // Only one entry will be in the cache final int MAX_CACHE_SIZE = 1; DFSClientCache cache = new DFSClientCache(conf, MAX_CACHE_SIZE); DFSClient c1 = cache.getDfsClient("test1"); assertTrue(cache.getDfsClient("test1").toString().contains("ugi=test1")); assertEquals(c1, cache.getDfsClient("test1")); assertFalse(isDfsClientClose(c1)); cache.getDfsClient("test2"); assertTrue(isDfsClientClose(c1)); assertTrue("cache size should be the max size or less", cache.clientCache.size() <= MAX_CACHE_SIZE); } @Test public void testGetUserGroupInformationSecure() throws IOException { String userName = "user1"; String currentUser = "test-user"; NfsConfiguration conf = new NfsConfiguration(); UserGroupInformation currentUserUgi = UserGroupInformation.createRemoteUser(currentUser); currentUserUgi.setAuthenticationMethod(KERBEROS); UserGroupInformation.setLoginUser(currentUserUgi); DFSClientCache cache = new DFSClientCache(conf); UserGroupInformation ugiResult = cache.getUserGroupInformation(userName, currentUserUgi); assertThat(ugiResult.getUserName(), is(userName)); assertThat(ugiResult.getRealUser(), is(currentUserUgi)); assertThat( ugiResult.getAuthenticationMethod(), is(UserGroupInformation.AuthenticationMethod.PROXY)); } @Test public void testGetUserGroupInformation() throws IOException { String userName = "user1"; String currentUser = "currentUser"; UserGroupInformation currentUserUgi = UserGroupInformation .createUserForTesting(currentUser, new String[0]); NfsConfiguration conf = new NfsConfiguration(); conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost"); DFSClientCache cache = new DFSClientCache(conf); UserGroupInformation ugiResult = cache.getUserGroupInformation(userName, currentUserUgi); assertThat(ugiResult.getUserName(), is(userName)); assertThat(ugiResult.getRealUser(), is(currentUserUgi)); assertThat( ugiResult.getAuthenticationMethod(), is(UserGroupInformation.AuthenticationMethod.PROXY)); } private static boolean isDfsClientClose(DFSClient c) { try { c.exists(""); } catch (IOException e) { return e.getMessage().equals("Filesystem closed"); } return false; } }
4,004
35.743119
92
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.util.List; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3; import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.response.READDIR3Response; import org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3; import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response; import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response.EntryPlus3; import org.apache.hadoop.oncrpc.RpcInfo; import org.apache.hadoop.oncrpc.RpcMessage; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelHandlerContext; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.Mockito; /** * Test READDIR and READDIRPLUS request with zero, nonzero cookies */ public class TestReaddir { static NfsConfiguration config = new NfsConfiguration(); static MiniDFSCluster cluster = null; static DistributedFileSystem hdfs; static NameNode nn; static RpcProgramNfs3 nfsd; static String testdir = "/tmp"; static SecurityHandler securityHandler; @BeforeClass public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserGroupConfKey(currentUser), "*"); config.set( DefaultImpersonationProvider.getTestProvider(). getProxySuperuserIpConfKey(currentUser), "*"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); // Start nfs Nfs3 nfs3 = new Nfs3(config); nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); securityHandler = Mockito.mock(SecurityHandler.class); Mockito.when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); } @AfterClass public static void shutdown() throws Exception { if (cluster != null) { cluster.shutdown(); } } @Before public void createFiles() throws IllegalArgumentException, IOException { hdfs.delete(new Path(testdir), true); hdfs.mkdirs(new Path(testdir)); DFSTestUtil.createFile(hdfs, new Path(testdir + "/f1"), 0, (short) 1, 0); DFSTestUtil.createFile(hdfs, new Path(testdir + "/f2"), 0, (short) 1, 0); DFSTestUtil.createFile(hdfs, new Path(testdir + "/f3"), 0, (short) 1, 0); } @Test public void testReaddirBasic() throws IOException { // Get inodeId of /tmp HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); // Create related part of the XDR request XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeLongAsHyper(0); // cookie xdr_req.writeLongAsHyper(0); // verifier xdr_req.writeInt(100); // count READDIR3Response response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); List<Entry3> dirents = response.getDirList().getEntries(); assertTrue(dirents.size() == 5); // inculding dot, dotdot // Test start listing from f2 status = nn.getRpcServer().getFileInfo(testdir + "/f2"); long f2Id = status.getFileId(); // Create related part of the XDR request xdr_req = new XDR(); handle = new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeLongAsHyper(f2Id); // cookie xdr_req.writeLongAsHyper(0); // verifier xdr_req.writeInt(100); // count response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); dirents = response.getDirList().getEntries(); assertTrue(dirents.size() == 1); Entry3 entry = dirents.get(0); assertTrue(entry.getName().equals("f3")); // When the cookie is deleted, list starts over no including dot, dotdot hdfs.delete(new Path(testdir + "/f2"), false); response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); dirents = response.getDirList().getEntries(); assertTrue(dirents.size() == 2); // No dot, dotdot } @Test // Test readdirplus public void testReaddirPlus() throws IOException { // Get inodeId of /tmp HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); // Create related part of the XDR request XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeLongAsHyper(0); // cookie xdr_req.writeLongAsHyper(0); // verifier xdr_req.writeInt(100); // dirCount xdr_req.writeInt(1000); // maxCount READDIRPLUS3Response responsePlus = nfsd.readdirplus(xdr_req .asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); List<EntryPlus3> direntPlus = responsePlus.getDirListPlus().getEntries(); assertTrue(direntPlus.size() == 5); // including dot, dotdot // Test start listing from f2 status = nn.getRpcServer().getFileInfo(testdir + "/f2"); long f2Id = status.getFileId(); // Create related part of the XDR request xdr_req = new XDR(); handle = new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeLongAsHyper(f2Id); // cookie xdr_req.writeLongAsHyper(0); // verifier xdr_req.writeInt(100); // dirCount xdr_req.writeInt(1000); // maxCount responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); direntPlus = responsePlus.getDirListPlus().getEntries(); assertTrue(direntPlus.size() == 1); EntryPlus3 entryPlus = direntPlus.get(0); assertTrue(entryPlus.getName().equals("f3")); // When the cookie is deleted, list starts over no including dot, dotdot hdfs.delete(new Path(testdir + "/f2"), false); responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); direntPlus = responsePlus.getDirListPlus().getEntries(); assertTrue(direntPlus.size() == 2); // No dot, dotdot } }
8,160
36.95814
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOffsetRange.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; import java.io.IOException; import org.junit.Test; public class TestOffsetRange { @Test(expected = IllegalArgumentException.class) public void testConstructor1() throws IOException { new OffsetRange(0, 0); } @Test(expected = IllegalArgumentException.class) public void testConstructor2() throws IOException { new OffsetRange(-1, 0); } @Test(expected = IllegalArgumentException.class) public void testConstructor3() throws IOException { new OffsetRange(-3, -1); } @Test(expected = IllegalArgumentException.class) public void testConstructor4() throws IOException { new OffsetRange(-3, 100); } @Test public void testCompare() throws IOException { OffsetRange r1 = new OffsetRange(0, 1); OffsetRange r2 = new OffsetRange(1, 3); OffsetRange r3 = new OffsetRange(1, 3); OffsetRange r4 = new OffsetRange(3, 4); assertEquals(0, OffsetRange.ReverseComparatorOnMin.compare(r2, r3)); assertEquals(0, OffsetRange.ReverseComparatorOnMin.compare(r2, r2)); assertTrue(OffsetRange.ReverseComparatorOnMin.compare(r2, r1) < 0); assertTrue(OffsetRange.ReverseComparatorOnMin.compare(r2, r4) > 0); } }
2,106
33.540984
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.junit.Assert.assertTrue; import java.io.File; import java.net.URL; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; public class TestNfs3HttpServer { private static final String BASEDIR = System.getProperty("test.build.dir", "target/test-dir") + "/" + TestNfs3HttpServer.class.getSimpleName(); private static NfsConfiguration conf = new NfsConfiguration(); private static MiniDFSCluster cluster; private static String keystoresDir; private static String sslConfDir; @BeforeClass public static void setUp() throws Exception { conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTP_AND_HTTPS.name()); conf.set(NfsConfigKeys.NFS_HTTP_ADDRESS_KEY, "localhost:0"); conf.set(NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY, "localhost:0"); // Use emphral port in case tests are running in parallel conf.setInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, 0); conf.setInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, 0); File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNfs3HttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); } @AfterClass public static void tearDown() throws Exception { FileUtil.fullyDelete(new File(BASEDIR)); if (cluster != null) { cluster.shutdown(); } KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); } @Test public void testHttpServer() throws Exception { Nfs3 nfs = new Nfs3(conf); nfs.startServiceInternal(false); RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs.getRpcProgram(); Nfs3HttpServer infoServer = nfsd.getInfoServer(); String urlRoot = infoServer.getServerURI().toString(); // Check default servlets. String pageContents = DFSTestUtil.urlGet(new URL(urlRoot + "/jmx")); assertTrue("Bad contents: " + pageContents, pageContents.contains("java.lang:type=")); System.out.println("pc:" + pageContents); int port = infoServer.getSecurePort(); assertTrue("Can't get https port", port > 0); } }
3,536
36.62766
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.IOException; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.security.ShellBasedIdMapping; import org.junit.Test; import org.mockito.Mockito; public class TestOpenFileCtxCache { static boolean cleaned = false; @Test public void testEviction() throws IOException, InterruptedException { NfsConfiguration conf = new NfsConfiguration(); // Only two entries will be in the cache conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2); DFSClient dfsClient = Mockito.mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); Mockito.when(fos.getPos()).thenReturn((long) 0); OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); OpenFileCtx context5 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100); boolean ret = cache.put(new FileHandle(1), context1); assertTrue(ret); Thread.sleep(1000); ret = cache.put(new FileHandle(2), context2); assertTrue(ret); ret = cache.put(new FileHandle(3), context3); assertFalse(ret); assertTrue(cache.size() == 2); // Wait for the oldest stream to be evict-able, insert again Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT); assertTrue(cache.size() == 2); ret = cache.put(new FileHandle(3), context3); assertTrue(ret); assertTrue(cache.size() == 2); assertTrue(cache.get(new FileHandle(1)) == null); // Test inactive entry is evicted immediately context3.setActiveStatusForTest(false); ret = cache.put(new FileHandle(4), context4); assertTrue(ret); // Now the cache has context2 and context4 // Test eviction failure if all entries have pending work. context2.getPendingWritesForTest().put(new OffsetRange(0, 100), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); context4.getPendingCommitsForTest().put(new Long(100), new CommitCtx(0, null, 0, attr)); Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT); ret = cache.put(new FileHandle(5), context5); assertFalse(ret); } @Test public void testScan() throws IOException, InterruptedException { NfsConfiguration conf = new NfsConfiguration(); // Only two entries will be in the cache conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2); DFSClient dfsClient = Mockito.mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); Mockito.when(fos.getPos()).thenReturn((long) 0); OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100); // Test cleaning expired entry boolean ret = cache.put(new FileHandle(1), context1); assertTrue(ret); ret = cache.put(new FileHandle(2), context2); assertTrue(ret); Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + 1); cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT); assertTrue(cache.size() == 0); // Test cleaning inactive entry ret = cache.put(new FileHandle(3), context3); assertTrue(ret); ret = cache.put(new FileHandle(4), context4); assertTrue(ret); context3.setActiveStatusForTest(false); cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT); assertTrue(cache.size() == 1); assertTrue(cache.get(new FileHandle(3)) == null); assertTrue(cache.get(new FileHandle(4)) != null); } }
5,949
40.901408
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.junit.Assert.assertTrue; import java.io.IOException; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.mount.Mountd; import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd; import org.junit.Test; public class TestExportsTable { @Test public void testExportPoint() throws IOException { NfsConfiguration config = new NfsConfiguration(); MiniDFSCluster cluster = null; String exportPoint = "/myexport1"; config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); try { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); // Start nfs final Nfs3 nfsServer = new Nfs3(config); nfsServer.startServiceInternal(false); Mountd mountd = nfsServer.getMountd(); RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram(); assertTrue(rpcMount.getExports().size() == 1); String exportInMountd = rpcMount.getExports().get(0); assertTrue(exportInMountd.equals(exportPoint)); } finally { if (cluster != null) { cluster.shutdown(); } } } }
2,271
33.424242
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.conf; public class NfsConfigKeys { // The IP port number for NFS and mountd. public final static String DFS_NFS_SERVER_PORT_KEY = "nfs.server.port"; public final static int DFS_NFS_SERVER_PORT_DEFAULT = 2049; public final static String DFS_NFS_MOUNTD_PORT_KEY = "nfs.mountd.port"; public final static int DFS_NFS_MOUNTD_PORT_DEFAULT = 4242; public static final String DFS_NFS_FILE_DUMP_KEY = "nfs.file.dump"; public static final boolean DFS_NFS_FILE_DUMP_DEFAULT = true; public static final String DFS_NFS_FILE_DUMP_DIR_KEY = "nfs.dump.dir"; public static final String DFS_NFS_FILE_DUMP_DIR_DEFAULT = "/tmp/.hdfs-nfs"; public static final String DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY = "nfs.rtmax"; public static final int DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT = 1024 * 1024; public static final String DFS_NFS_MAX_WRITE_TRANSFER_SIZE_KEY = "nfs.wtmax"; public static final int DFS_NFS_MAX_WRITE_TRANSFER_SIZE_DEFAULT = 1024 * 1024; public static final String DFS_NFS_MAX_READDIR_TRANSFER_SIZE_KEY = "nfs.dtmax"; public static final int DFS_NFS_MAX_READDIR_TRANSFER_SIZE_DEFAULT = 64 * 1024; public static final String DFS_NFS_MAX_OPEN_FILES_KEY = "nfs.max.open.files"; public static final int DFS_NFS_MAX_OPEN_FILES_DEFAULT = 256; public static final String DFS_NFS_STREAM_TIMEOUT_KEY = "nfs.stream.timeout"; public static final long DFS_NFS_STREAM_TIMEOUT_DEFAULT = 10 * 60 * 1000; // 10 minutes public static final long DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT = 10 * 1000; // 10 seconds public final static String DFS_NFS_EXPORT_POINT_KEY = "nfs.export.point"; public final static String DFS_NFS_EXPORT_POINT_DEFAULT = "/"; public static final String DFS_NFS_KEYTAB_FILE_KEY = "nfs.keytab.file"; public static final String DFS_NFS_KERBEROS_PRINCIPAL_KEY = "nfs.kerberos.principal"; public static final String DFS_NFS_REGISTRATION_PORT_KEY = "nfs.registration.port"; public static final int DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned. public static final String DFS_NFS_PORT_MONITORING_DISABLED_KEY = "nfs.port.monitoring.disabled"; public static final boolean DFS_NFS_PORT_MONITORING_DISABLED_DEFAULT = true; public static final String AIX_COMPAT_MODE_KEY = "nfs.aix.compatibility.mode.enabled"; public static final boolean AIX_COMPAT_MODE_DEFAULT = false; public final static String LARGE_FILE_UPLOAD = "nfs.large.file.upload"; public final static boolean LARGE_FILE_UPLOAD_DEFAULT = true; public static final String NFS_HTTP_PORT_KEY = "nfs.http.port"; public static final int NFS_HTTP_PORT_DEFAULT = 50079; public static final String NFS_HTTP_ADDRESS_KEY = "nfs.http.address"; public static final String NFS_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + NFS_HTTP_PORT_DEFAULT; public static final String NFS_HTTPS_PORT_KEY = "nfs.https.port"; public static final int NFS_HTTPS_PORT_DEFAULT = 50579; public static final String NFS_HTTPS_ADDRESS_KEY = "nfs.https.address"; public static final String NFS_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + NFS_HTTPS_PORT_DEFAULT; public static final String NFS_METRICS_PERCENTILES_INTERVALS_KEY = "nfs.metrics.percentiles.intervals"; /* * HDFS super-user is the user with the same identity as NameNode process * itself and the super-user can do anything in that permissions checks never * fail for the super-user. If the following property is configured, the * superuser on NFS client can access any file on HDFS. By default, the super * user is not configured in the gateway. Note that, even the the superuser is * configured, "nfs.exports.allowed.hosts" still takes effect. For example, * the superuser will not have write access to HDFS files through the gateway * if the NFS client host is not allowed to have write access in * "nfs.exports.allowed.hosts". */ public static final String NFS_SUPERUSER_KEY = "nfs.superuser"; public static final String NFS_SUPERUSER_DEFAULT = ""; }
4,805
52.4
106
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfiguration.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.conf; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.security.IdMappingConstant; /** * Adds deprecated keys into the configuration. */ public class NfsConfiguration extends HdfsConfiguration { static { addDeprecatedKeys(); } private static void addDeprecatedKeys() { Configuration.addDeprecations(new DeprecationDelta[] { new DeprecationDelta("nfs3.server.port", NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY), new DeprecationDelta("nfs3.mountd.port", NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY), new DeprecationDelta("dfs.nfs.exports.cache.size", Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY), new DeprecationDelta("dfs.nfs.exports.cache.expirytime.millis", Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY), new DeprecationDelta("hadoop.nfs.userupdate.milly", IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY), new DeprecationDelta("nfs.usergroup.update.millis", IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY), new DeprecationDelta("nfs.static.mapping.file", IdMappingConstant.STATIC_ID_MAPPING_FILE_KEY), new DeprecationDelta("dfs.nfs3.enableDump", NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY), new DeprecationDelta("dfs.nfs3.dump.dir", NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY), new DeprecationDelta("dfs.nfs3.max.open.files", NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY), new DeprecationDelta("dfs.nfs3.stream.timeout", NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY), new DeprecationDelta("dfs.nfs3.export.point", NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY), new DeprecationDelta("nfs.allow.insecure.ports", NfsConfigKeys.DFS_NFS_PORT_MONITORING_DISABLED_KEY), new DeprecationDelta("dfs.nfs.keytab.file", NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY), new DeprecationDelta("dfs.nfs.kerberos.principal", NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY), new DeprecationDelta("dfs.nfs.rtmax", NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY), new DeprecationDelta("dfs.nfs.wtmax", NfsConfigKeys.DFS_NFS_MAX_WRITE_TRANSFER_SIZE_KEY), new DeprecationDelta("dfs.nfs.dtmax", NfsConfigKeys.DFS_NFS_MAX_READDIR_TRANSFER_SIZE_KEY) }); } }
3,335
44.69863
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.mount; import java.io.IOException; import java.net.DatagramSocket; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.mount.MountEntry; import org.apache.hadoop.mount.MountInterface; import org.apache.hadoop.mount.MountResponse; import org.apache.hadoop.nfs.AccessPrivilege; import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcInfo; import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcResponse; import org.apache.hadoop.oncrpc.RpcUtil; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.channel.ChannelHandlerContext; import com.google.common.annotations.VisibleForTesting; /** * RPC program corresponding to mountd daemon. See {@link Mountd}. */ public class RpcProgramMountd extends RpcProgram implements MountInterface { private static final Log LOG = LogFactory.getLog(RpcProgramMountd.class); public static final int PROGRAM = 100005; public static final int VERSION_1 = 1; public static final int VERSION_2 = 2; public static final int VERSION_3 = 3; private final DFSClient dfsClient; /** Synchronized list */ private final List<MountEntry> mounts; /** List that is unmodifiable */ private final List<String> exports; private final NfsExports hostsMatcher; public RpcProgramMountd(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { // Note that RPC cache is not enabled super("mountd", "localhost", config.getInt( NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1, VERSION_3, registrationSocket, allowInsecurePorts); exports = new ArrayList<String>(); exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT)); this.hostsMatcher = NfsExports.getInstance(config); this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>()); UserGroupInformation.setConfiguration(config); SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY, NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY); this.dfsClient = new DFSClient(NameNode.getAddress(config), config); } @Override public XDR nullOp(XDR out, int xid, InetAddress client) { if (LOG.isDebugEnabled()) { LOG.debug("MOUNT NULLOP : " + " client: " + client); } return RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write( out); } @Override public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) { if (hostsMatcher == null) { return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null); } AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client); if (accessPrivilege == AccessPrivilege.NONE) { return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null); } String path = xdr.readString(); if (LOG.isDebugEnabled()) { LOG.debug("MOUNT MNT path: " + path + " client: " + client); } String host = client.getHostName(); if (LOG.isDebugEnabled()) { LOG.debug("Got host: " + host + " path: " + path); } if (!exports.contains(path)) { LOG.info("Path " + path + " is not shared."); MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null); return out; } FileHandle handle = null; try { HdfsFileStatus exFileStatus = dfsClient.getFileInfo(path); handle = new FileHandle(exFileStatus.getFileId()); } catch (IOException e) { LOG.error("Can't get handle for export:" + path, e); MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null); return out; } assert (handle != null); LOG.info("Giving handle (fileId:" + handle.getFileId() + ") to client for export " + path); mounts.add(new MountEntry(host, path)); MountResponse.writeMNTResponse(Nfs3Status.NFS3_OK, out, xid, handle.getContent()); return out; } @Override public XDR dump(XDR out, int xid, InetAddress client) { if (LOG.isDebugEnabled()) { LOG.debug("MOUNT NULLOP : " + " client: " + client); } List<MountEntry> copy = new ArrayList<MountEntry>(mounts); MountResponse.writeMountList(out, xid, copy); return out; } @Override public XDR umnt(XDR xdr, XDR out, int xid, InetAddress client) { String path = xdr.readString(); if (LOG.isDebugEnabled()) { LOG.debug("MOUNT UMNT path: " + path + " client: " + client); } String host = client.getHostName(); mounts.remove(new MountEntry(host, path)); RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(out); return out; } @Override public XDR umntall(XDR out, int xid, InetAddress client) { if (LOG.isDebugEnabled()) { LOG.debug("MOUNT UMNTALL : " + " client: " + client); } mounts.clear(); return RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write( out); } @Override public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { RpcCall rpcCall = (RpcCall) info.header(); final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure()); int xid = rpcCall.getXid(); byte[] data = new byte[info.data().readableBytes()]; info.data().readBytes(data); XDR xdr = new XDR(data); XDR out = new XDR(); InetAddress client = ((InetSocketAddress) info.remoteAddress()).getAddress(); if (mntproc == MNTPROC.NULL) { out = nullOp(out, xid, client); } else if (mntproc == MNTPROC.MNT) { // Only do port monitoring for MNT if (!doPortMonitoring(info.remoteAddress())) { out = MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null); } else { out = mnt(xdr, out, xid, client); } } else if (mntproc == MNTPROC.DUMP) { out = dump(out, xid, client); } else if (mntproc == MNTPROC.UMNT) { out = umnt(xdr, out, xid, client); } else if (mntproc == MNTPROC.UMNTALL) { umntall(out, xid, client); } else if (mntproc == MNTPROC.EXPORT) { // Currently only support one NFS export List<NfsExports> hostsMatchers = new ArrayList<NfsExports>(); if (hostsMatcher != null) { hostsMatchers.add(hostsMatcher); out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); } else { // This means there are no valid exports provided. RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( out); } } else { // Invalid procedure RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( out); } ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer()); RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); RpcUtil.sendRpcResponse(ctx, rsp); } @Override protected boolean isIdempotent(RpcCall call) { // Not required, because cache is turned off return false; } @VisibleForTesting public List<String> getExports() { return this.exports; } }
9,021
35.379032
84
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.mount; import java.io.IOException; import java.net.DatagramSocket; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.mount.MountdBase; /** * Main class for starting mountd daemon. This daemon implements the NFS * mount protocol. When receiving a MOUNT request from an NFS client, it checks * the request against the list of currently exported file systems. If the * client is permitted to mount the file system, rpc.mountd obtains a file * handle for requested directory and returns it to the client. */ public class Mountd extends MountdBase { public Mountd(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { super(new RpcProgramMountd(config, registrationSocket, allowInsecurePorts)); } public static void main(String[] args) throws IOException { NfsConfiguration config = new NfsConfiguration(); Mountd mountd = new Mountd(config, null, true); mountd.start(true); } }
1,846
39.152174
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.net.DatagramSocket; import java.net.InetSocketAddress; import org.apache.commons.daemon.Daemon; import org.apache.commons.daemon.DaemonContext; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; /** * This class is used to allow the initial registration of the NFS gateway with * the system portmap daemon to come from a privileged (&lt; 1024) port. This is * necessary on certain operating systems to work around this bug in rpcbind: * * Red Hat: https://bugzilla.redhat.com/show_bug.cgi?id=731542 * SLES: https://bugzilla.novell.com/show_bug.cgi?id=823364 * Debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=594880 */ public class PrivilegedNfsGatewayStarter implements Daemon { private String[] args = null; private DatagramSocket registrationSocket = null; @Override public void init(DaemonContext context) throws Exception { System.err.println("Initializing privileged NFS client socket..."); NfsConfiguration conf = new NfsConfiguration(); int clientPort = conf.getInt(NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY, NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_DEFAULT); if (clientPort < 1 || clientPort > 1023) { throw new RuntimeException("Must start privileged NFS server with '" + NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY + "' configured to a " + "privileged port."); } registrationSocket = new DatagramSocket( new InetSocketAddress("localhost", clientPort)); registrationSocket.setReuseAddress(true); args = context.getArguments(); } @Override public void start() throws Exception { Nfs3.startService(args, registrationSocket); } @Override public void stop() throws Exception { // Nothing to do. } @Override public void destroy() { if (registrationSocket != null && !registrationSocket.isClosed()) { registrationSocket.close(); } } }
2,804
35.907895
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.util.ArrayList; import java.util.Iterator; import java.util.Map.Entry; import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; /** * A cache saves OpenFileCtx objects for different users. Each cache entry is * used to maintain the writing context for a single file. */ class OpenFileCtxCache { private static final Log LOG = LogFactory.getLog(OpenFileCtxCache.class); // Insert and delete with openFileMap are synced private final ConcurrentMap<FileHandle, OpenFileCtx> openFileMap = Maps .newConcurrentMap(); private final int maxStreams; private final long streamTimeout; private final StreamMonitor streamMonitor; OpenFileCtxCache(NfsConfiguration config, long streamTimeout) { maxStreams = config.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_DEFAULT); LOG.info("Maximum open streams is " + maxStreams); this.streamTimeout = streamTimeout; streamMonitor = new StreamMonitor(); } /** * The entry to be evicted is based on the following rules:<br> * 1. if the OpenFileCtx has any pending task, it will not be chosen.<br> * 2. if there is inactive OpenFileCtx, the first found one is to evict. <br> * 3. For OpenFileCtx entries don't belong to group 1 or 2, the idlest one * is select. If it's idle longer than OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT, it * will be evicted. Otherwise, the whole eviction request is failed. */ @VisibleForTesting Entry<FileHandle, OpenFileCtx> getEntryToEvict() { Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { LOG.trace("openFileMap size:" + openFileMap.size()); } Entry<FileHandle, OpenFileCtx> idlest = null; while (it.hasNext()) { Entry<FileHandle, OpenFileCtx> pairs = it.next(); OpenFileCtx ctx = pairs.getValue(); if (!ctx.getActiveState()) { if (LOG.isDebugEnabled()) { LOG.debug("Got one inactive stream: " + ctx); } return pairs; } if (ctx.hasPendingWork()) { // Always skip files with pending work. continue; } if (idlest == null) { idlest = pairs; } else { if (ctx.getLastAccessTime() < idlest.getValue().getLastAccessTime()) { idlest = pairs; } } } if (idlest == null) { LOG.warn("No eviction candidate. All streams have pending work."); return null; } else { long idleTime = Time.monotonicNow() - idlest.getValue().getLastAccessTime(); if (idleTime < NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT) { if (LOG.isDebugEnabled()) { LOG.debug("idlest stream's idle time:" + idleTime); } LOG.warn("All opened streams are busy, can't remove any from cache."); return null; } else { return idlest; } } } boolean put(FileHandle h, OpenFileCtx context) { OpenFileCtx toEvict = null; synchronized (this) { Preconditions.checkState(openFileMap.size() <= this.maxStreams, "stream cache size " + openFileMap.size() + " is larger than maximum" + this.maxStreams); if (openFileMap.size() == this.maxStreams) { Entry<FileHandle, OpenFileCtx> pairs = getEntryToEvict(); if (pairs ==null) { return false; } else { if (LOG.isDebugEnabled()) { LOG.debug("Evict stream ctx: " + pairs.getValue()); } toEvict = openFileMap.remove(pairs.getKey()); Preconditions.checkState(toEvict == pairs.getValue(), "The deleted entry is not the same as odlest found."); } } openFileMap.put(h, context); } // Cleanup the old stream outside the lock if (toEvict != null) { toEvict.cleanup(); } return true; } @VisibleForTesting void scan(long streamTimeout) { ArrayList<OpenFileCtx> ctxToRemove = new ArrayList<OpenFileCtx>(); Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { LOG.trace("openFileMap size:" + openFileMap.size()); } while (it.hasNext()) { Entry<FileHandle, OpenFileCtx> pairs = it.next(); FileHandle handle = pairs.getKey(); OpenFileCtx ctx = pairs.getValue(); if (!ctx.streamCleanup(handle.getFileId(), streamTimeout)) { continue; } // Check it again inside lock before removing synchronized (this) { OpenFileCtx ctx2 = openFileMap.get(handle); if (ctx2 != null) { if (ctx2.streamCleanup(handle.getFileId(), streamTimeout)) { openFileMap.remove(handle); if (LOG.isDebugEnabled()) { LOG.debug("After remove stream " + handle.getFileId() + ", the stream number:" + openFileMap.size()); } ctxToRemove.add(ctx2); } } } } // Invoke the cleanup outside the lock for (OpenFileCtx ofc : ctxToRemove) { ofc.cleanup(); } } OpenFileCtx get(FileHandle key) { return openFileMap.get(key); } int size() { return openFileMap.size(); } void start() { streamMonitor.start(); } // Evict all entries void cleanAll() { ArrayList<OpenFileCtx> cleanedContext = new ArrayList<OpenFileCtx>(); synchronized (this) { Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { LOG.trace("openFileMap size:" + openFileMap.size()); } while (it.hasNext()) { Entry<FileHandle, OpenFileCtx> pairs = it.next(); OpenFileCtx ctx = pairs.getValue(); it.remove(); cleanedContext.add(ctx); } } // Invoke the cleanup outside the lock for (OpenFileCtx ofc : cleanedContext) { ofc.cleanup(); } } void shutdown() { // stop the dump thread if (streamMonitor.isAlive()) { streamMonitor.shouldRun(false); streamMonitor.interrupt(); try { streamMonitor.join(3000); } catch (InterruptedException ignored) { } } cleanAll(); } /** * StreamMonitor wakes up periodically to find and closes idle streams. */ class StreamMonitor extends Daemon { private final static int rotation = 5 * 1000; // 5 seconds private long lastWakeupTime = 0; private boolean shouldRun = true; void shouldRun(boolean shouldRun) { this.shouldRun = shouldRun; } @Override public void run() { while (shouldRun) { scan(streamTimeout); // Check if it can sleep try { long workedTime = Time.monotonicNow() - lastWakeupTime; if (workedTime < rotation) { if (LOG.isTraceEnabled()) { LOG.trace("StreamMonitor can still have a sleep:" + ((rotation - workedTime) / 1000)); } Thread.sleep(rotation - workedTime); } lastWakeupTime = Time.monotonicNow(); } catch (InterruptedException e) { LOG.info("StreamMonitor got interrupted"); return; } } } } }
8,577
30.536765
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.IOException; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.nfs.NfsFileType; import org.apache.hadoop.nfs.NfsTime; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.response.WccAttr; import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.security.IdMappingServiceProvider; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.channel.Channel; /** * Utility/helper methods related to NFS */ public class Nfs3Utils { public final static String INODEID_PATH_PREFIX = "/.reserved/.inodes/"; public final static String READ_RPC_START = "READ_RPC_CALL_START____"; public final static String READ_RPC_END = "READ_RPC_CALL_END______"; public final static String WRITE_RPC_START = "WRITE_RPC_CALL_START____"; public final static String WRITE_RPC_END = "WRITE_RPC_CALL_END______"; public static String getFileIdPath(FileHandle handle) { return getFileIdPath(handle.getFileId()); } public static String getFileIdPath(long fileId) { return INODEID_PATH_PREFIX + fileId; } public static HdfsFileStatus getFileStatus(DFSClient client, String fileIdPath) throws IOException { return client.getFileLinkInfo(fileIdPath); } public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus( HdfsFileStatus fs, IdMappingServiceProvider iug) { /** * Some 32bit Linux client has problem with 64bit fileId: it seems the 32bit * client takes only the lower 32bit of the fileId and treats it as signed * int. When the 32th bit is 1, the client considers it invalid. */ NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG; fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType; int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1; long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs .getChildrenNum()) : fs.getLen(); return new Nfs3FileAttributes(fileType, nlink, fs.getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()), iug.getGidAllowingUnknown(fs.getGroup()), size, 0 /* fsid */, fs.getFileId(), fs.getModificationTime(), fs.getAccessTime(), new Nfs3FileAttributes.Specdata3()); } public static Nfs3FileAttributes getFileAttr(DFSClient client, String fileIdPath, IdMappingServiceProvider iug) throws IOException { HdfsFileStatus fs = getFileStatus(client, fileIdPath); return fs == null ? null : getNfs3FileAttrFromFileStatus(fs, iug); } /** * HDFS directory size is always zero. Try to return something meaningful * here. Assume each child take 32bytes. */ public static long getDirSize(int childNum) { return (childNum + 2) * 32; } public static WccAttr getWccAttr(DFSClient client, String fileIdPath) throws IOException { HdfsFileStatus fstat = getFileStatus(client, fileIdPath); if (fstat == null) { return null; } long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat .getLen(); return new WccAttr(size, new NfsTime(fstat.getModificationTime()), new NfsTime(fstat.getModificationTime())); } public static WccAttr getWccAttr(Nfs3FileAttributes attr) { return attr == null ? new WccAttr() : new WccAttr(attr.getSize(), attr.getMtime(), attr.getCtime()); } // TODO: maybe not efficient public static WccData createWccData(final WccAttr preOpAttr, DFSClient dfsClient, final String fileIdPath, final IdMappingServiceProvider iug) throws IOException { Nfs3FileAttributes postOpDirAttr = getFileAttr(dfsClient, fileIdPath, iug); return new WccData(preOpAttr, postOpDirAttr); } /** * Send a write response to the netty network socket channel */ public static void writeChannel(Channel channel, XDR out, int xid) { if (channel == null) { RpcProgramNfs3.LOG .info("Null channel should only happen in tests. Do nothing."); return; } if (RpcProgramNfs3.LOG.isDebugEnabled()) { RpcProgramNfs3.LOG.debug(WRITE_RPC_END + xid); } ChannelBuffer outBuf = XDR.writeMessageTcp(out, true); channel.write(outBuf); } public static void writeChannelCommit(Channel channel, XDR out, int xid) { if (RpcProgramNfs3.LOG.isDebugEnabled()) { RpcProgramNfs3.LOG.debug("Commit done:" + xid); } ChannelBuffer outBuf = XDR.writeMessageTcp(out, true); channel.write(outBuf); } private static boolean isSet(int access, int bits) { return (access & bits) == bits; } public static int getAccessRights(int mode, int type) { int rtn = 0; if (isSet(mode, Nfs3Constant.ACCESS_MODE_READ)) { rtn |= Nfs3Constant.ACCESS3_READ; // LOOKUP is only meaningful for dir if (type == NfsFileType.NFSDIR.toValue()) { rtn |= Nfs3Constant.ACCESS3_LOOKUP; } } if (isSet(mode, Nfs3Constant.ACCESS_MODE_WRITE)) { rtn |= Nfs3Constant.ACCESS3_MODIFY; rtn |= Nfs3Constant.ACCESS3_EXTEND; // Set delete bit, UNIX may ignore it for regular file since it's up to // parent dir op permission rtn |= Nfs3Constant.ACCESS3_DELETE; } if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) { if (type == NfsFileType.NFSREG.toValue()) { rtn |= Nfs3Constant.ACCESS3_EXECUTE; } else { rtn |= Nfs3Constant.ACCESS3_LOOKUP; } } return rtn; } public static int getAccessRightsForUserGroup(int uid, int gid, int[] auxGids, Nfs3FileAttributes attr) { int mode = attr.getMode(); if (uid == attr.getUid()) { return getAccessRights(mode >> 6, attr.getType()); } if (gid == attr.getGid()) { return getAccessRights(mode >> 3, attr.getType()); } // Check for membership in auxiliary groups if (auxGids != null) { for (int auxGid : auxGids) { if (attr.getGid() == auxGid) { return getAccessRights(mode >> 3, attr.getType()); } } } return getAccessRights(mode, attr.getType()); } public static long bytesToLong(byte[] data) { long n = 0xffL & data[0]; for (int i = 1; i < 8; i++) { n = (n << 8) | (0xffL & data[i]); } return n; } public static byte[] longToByte(long v) { byte[] data = new byte[8]; data[0] = (byte) (v >>> 56); data[1] = (byte) (v >>> 48); data[2] = (byte) (v >>> 40); data[3] = (byte) (v >>> 32); data[4] = (byte) (v >>> 24); data[5] = (byte) (v >>> 16); data[6] = (byte) (v >>> 8); data[7] = (byte) (v >>> 0); return data; } public static long getElapsedTime(long startTimeNano) { return System.nanoTime() - startTimeNano; } }
7,791
34.257919
81
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** * This class is a thread pool to easily schedule async data operations. Current * async data operation is write back operation. In the future, we could use it * for readahead operations too. */ public class AsyncDataService { static final Log LOG = LogFactory.getLog(AsyncDataService.class); // ThreadPool core pool size private static final int CORE_THREADS_PER_VOLUME = 1; // ThreadPool maximum pool size private static final int MAXIMUM_THREADS_PER_VOLUME = 4; // ThreadPool keep-alive time for threads over core pool size private static final long THREADS_KEEP_ALIVE_SECONDS = 60; private final ThreadGroup threadGroup = new ThreadGroup("async data service"); private ThreadFactory threadFactory = null; private ThreadPoolExecutor executor = null; public AsyncDataService() { threadFactory = new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(threadGroup, r); } }; executor = new ThreadPoolExecutor(CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), threadFactory); // This can reduce the number of running threads executor.allowCoreThreadTimeOut(true); } /** * Execute the task sometime in the future. */ synchronized void execute(Runnable task) { if (executor == null) { throw new RuntimeException("AsyncDataService is already shutdown"); } if (LOG.isDebugEnabled()) { LOG.debug("Current active thread number: " + executor.getActiveCount() + " queue size: " + executor.getQueue().size() + " scheduled task number: " + executor.getTaskCount()); } executor.execute(task); } /** * Gracefully shut down the ThreadPool. Will wait for all data tasks to * finish. */ synchronized void shutdown() { if (executor == null) { LOG.warn("AsyncDataService has already shut down."); } else { LOG.info("Shutting down all async data service threads..."); executor.shutdown(); // clear the executor so that calling execute again will fail. executor = null; LOG.info("All async data service threads have been shut down"); } } /** * Write the data to HDFS asynchronously */ void writeAsync(OpenFileCtx openFileCtx) { if (LOG.isDebugEnabled()) { LOG.debug("Scheduling write back task for fileId: " + openFileCtx.getLatestAttr().getFileId()); } WriteBackTask wbTask = new WriteBackTask(openFileCtx); execute(wbTask); } /** * A task to write data back to HDFS for a file. Since only one thread can * write to a file, there should only be one task at any time for a file * (in queue or executing), and this should be guaranteed by the caller. */ static class WriteBackTask implements Runnable { OpenFileCtx openFileCtx; WriteBackTask(OpenFileCtx openFileCtx) { this.openFileCtx = openFileCtx; } OpenFileCtx getOpenFileCtx() { return openFileCtx; } @Override public String toString() { // Called in AsyncDataService.execute for displaying error messages. return "write back data for fileId" + openFileCtx.getLatestAttr().getFileId() + " with nextOffset " + openFileCtx.getNextOffset(); } @Override public void run() { try { openFileCtx.executeWriteBack(); } catch (Throwable t) { LOG.error("Async data service got error: ", t); } } } }
4,672
32.141844
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.metrics2.source.JvmMetrics; /** * This class is for maintaining the various NFS gateway activity statistics and * publishing them through the metrics interfaces. */ @InterfaceAudience.Private @Metrics(about = "Nfs3 metrics", context = "dfs") public class Nfs3Metrics { // All mutable rates are in nanoseconds // No metric for nullProcedure; @Metric MutableRate getattr; @Metric MutableRate setattr; @Metric MutableRate lookup; @Metric MutableRate access; @Metric MutableRate readlink; @Metric MutableRate read; final MutableQuantiles[] readNanosQuantiles; @Metric MutableRate write; final MutableQuantiles[] writeNanosQuantiles; @Metric MutableRate create; @Metric MutableRate mkdir; @Metric MutableRate symlink; @Metric MutableRate mknod; @Metric MutableRate remove; @Metric MutableRate rmdir; @Metric MutableRate rename; @Metric MutableRate link; @Metric MutableRate readdir; @Metric MutableRate readdirplus; @Metric MutableRate fsstat; @Metric MutableRate fsinfo; @Metric MutableRate pathconf; @Metric MutableRate commit; final MutableQuantiles[] commitNanosQuantiles; @Metric MutableCounterLong bytesWritten; @Metric MutableCounterLong bytesRead; final MetricsRegistry registry = new MetricsRegistry("nfs3"); final String name; JvmMetrics jvmMetrics = null; public Nfs3Metrics(String name, String sessionId, int[] intervals, final JvmMetrics jvmMetrics) { this.name = name; this.jvmMetrics = jvmMetrics; registry.tag(SessionId, sessionId); final int len = intervals.length; readNanosQuantiles = new MutableQuantiles[len]; writeNanosQuantiles = new MutableQuantiles[len]; commitNanosQuantiles = new MutableQuantiles[len]; for (int i = 0; i < len; i++) { int interval = intervals[i]; readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos" + interval + "s", "Read process in ns", "ops", "latency", interval); writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos" + interval + "s", "Write process in ns", "ops", "latency", interval); commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos" + interval + "s", "Commit process in ns", "ops", "latency", interval); } } public static Nfs3Metrics create(Configuration conf, String gatewayName) { String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); MetricsSystem ms = DefaultMetricsSystem.instance(); JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms); // Percentile measurement is [50th,75th,90th,95th,99th] currently int[] intervals = conf .getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY); return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm)); } public String name() { return name; } public JvmMetrics getJvmMetrics() { return jvmMetrics; } public void incrBytesWritten(long bytes) { bytesWritten.incr(bytes); } public void incrBytesRead(long bytes) { bytesRead.incr(bytes); } public void addGetattr(long latencyNanos) { getattr.add(latencyNanos); } public void addSetattr(long latencyNanos) { setattr.add(latencyNanos); } public void addLookup(long latencyNanos) { lookup.add(latencyNanos); } public void addAccess(long latencyNanos) { access.add(latencyNanos); } public void addReadlink(long latencyNanos) { readlink.add(latencyNanos); } public void addRead(long latencyNanos) { read.add(latencyNanos); for (MutableQuantiles q : readNanosQuantiles) { q.add(latencyNanos); } } public void addWrite(long latencyNanos) { write.add(latencyNanos); for (MutableQuantiles q : writeNanosQuantiles) { q.add(latencyNanos); } } public void addCreate(long latencyNanos) { create.add(latencyNanos); } public void addMkdir(long latencyNanos) { mkdir.add(latencyNanos); } public void addSymlink(long latencyNanos) { symlink.add(latencyNanos); } public void addMknod(long latencyNanos) { mknod.add(latencyNanos); } public void addRemove(long latencyNanos) { remove.add(latencyNanos); } public void addRmdir(long latencyNanos) { rmdir.add(latencyNanos); } public void addRename(long latencyNanos) { rename.add(latencyNanos); } public void addLink(long latencyNanos) { link.add(latencyNanos); } public void addReaddir(long latencyNanos) { readdir.add(latencyNanos); } public void addReaddirplus(long latencyNanos) { readdirplus.add(latencyNanos); } public void addFsstat(long latencyNanos) { fsstat.add(latencyNanos); } public void addFsinfo(long latencyNanos) { fsinfo.add(latencyNanos); } public void addPathconf(long latencyNanos) { pathconf.add(latencyNanos); } public void addCommit(long latencyNanos) { commit.add(latencyNanos); for (MutableQuantiles q : commitNanosQuantiles) { q.add(latencyNanos); } } }
6,600
29.004545
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import org.apache.commons.logging.LogFactory; import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; import java.util.Map.Entry; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import com.google.common.base.Preconditions; import org.apache.commons.logging.Log; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ShutdownHookManager; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Objects; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; /** * A cache saves DFSClient objects for different users */ class DFSClientCache { private static final Log LOG = LogFactory.getLog(DFSClientCache.class); /** * Cache that maps User id to the corresponding DFSClient. */ @VisibleForTesting final LoadingCache<String, DFSClient> clientCache; final static int DEFAULT_DFS_CLIENT_CACHE_SIZE = 256; /** * Cache that maps <DFSClient, inode path> to the corresponding * FSDataInputStream. */ final LoadingCache<DFSInputStreamCaheKey, FSDataInputStream> inputstreamCache; /** * Time to live for a DFSClient (in seconds) */ final static int DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE = 1024; final static int DEFAULT_DFS_INPUTSTREAM_CACHE_TTL = 10 * 60; private final NfsConfiguration config; private static class DFSInputStreamCaheKey { final String userId; final String inodePath; private DFSInputStreamCaheKey(String userId, String inodePath) { super(); this.userId = userId; this.inodePath = inodePath; } @Override public boolean equals(Object obj) { if (obj instanceof DFSInputStreamCaheKey) { DFSInputStreamCaheKey k = (DFSInputStreamCaheKey) obj; return userId.equals(k.userId) && inodePath.equals(k.inodePath); } return false; } @Override public int hashCode() { return Objects.hashCode(userId, inodePath); } } DFSClientCache(NfsConfiguration config) { this(config, DEFAULT_DFS_CLIENT_CACHE_SIZE); } DFSClientCache(NfsConfiguration config, int clientCache) { this.config = config; this.clientCache = CacheBuilder.newBuilder() .maximumSize(clientCache) .removalListener(clientRemovalListener()) .build(clientLoader()); this.inputstreamCache = CacheBuilder.newBuilder() .maximumSize(DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE) .expireAfterAccess(DEFAULT_DFS_INPUTSTREAM_CACHE_TTL, TimeUnit.SECONDS) .removalListener(inputStreamRemovalListener()) .build(inputStreamLoader()); ShutdownHookManager.get().addShutdownHook(new CacheFinalizer(), SHUTDOWN_HOOK_PRIORITY); } /** * Priority of the FileSystem shutdown hook. */ public static final int SHUTDOWN_HOOK_PRIORITY = 10; private class CacheFinalizer implements Runnable { @Override public synchronized void run() { try { closeAll(true); } catch (IOException e) { LOG.info("DFSClientCache.closeAll() threw an exception:\n", e); } } } /** * Close all DFSClient instances in the Cache. * @param onlyAutomatic only close those that are marked for automatic closing */ synchronized void closeAll(boolean onlyAutomatic) throws IOException { List<IOException> exceptions = new ArrayList<IOException>(); ConcurrentMap<String, DFSClient> map = clientCache.asMap(); for (Entry<String, DFSClient> item : map.entrySet()) { final DFSClient client = item.getValue(); if (client != null) { try { client.close(); } catch (IOException ioe) { exceptions.add(ioe); } } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } } private CacheLoader<String, DFSClient> clientLoader() { return new CacheLoader<String, DFSClient>() { @Override public DFSClient load(String userName) throws Exception { UserGroupInformation ugi = getUserGroupInformation( userName, UserGroupInformation.getCurrentUser()); // Guava requires CacheLoader never returns null. return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() { @Override public DFSClient run() throws IOException { return new DFSClient(NameNode.getAddress(config), config); } }); } }; } /** * This method uses the currentUser, and real user to create a proxy * @param effectiveUser The user who is being proxied by the real user * @param realUser The actual user who does the command * @return Proxy UserGroupInformation * @throws IOException If proxying fails */ UserGroupInformation getUserGroupInformation( String effectiveUser, UserGroupInformation realUser) throws IOException { Preconditions.checkNotNull(effectiveUser); Preconditions.checkNotNull(realUser); realUser.checkTGTAndReloginFromKeytab(); UserGroupInformation ugi = UserGroupInformation.createProxyUser(effectiveUser, realUser); if (LOG.isDebugEnabled()){ LOG.debug(String.format("Created ugi:" + " %s for username: %s", ugi, effectiveUser)); } return ugi; } private RemovalListener<String, DFSClient> clientRemovalListener() { return new RemovalListener<String, DFSClient>() { @Override public void onRemoval(RemovalNotification<String, DFSClient> notification) { DFSClient client = notification.getValue(); try { client.close(); } catch (IOException e) { LOG.warn(String.format( "IOException when closing the DFSClient(%s), cause: %s", client, e)); } } }; } private RemovalListener<DFSInputStreamCaheKey, FSDataInputStream> inputStreamRemovalListener() { return new RemovalListener<DFSClientCache.DFSInputStreamCaheKey, FSDataInputStream>() { @Override public void onRemoval( RemovalNotification<DFSInputStreamCaheKey, FSDataInputStream> notification) { try { notification.getValue().close(); } catch (IOException ignored) { } } }; } private CacheLoader<DFSInputStreamCaheKey, FSDataInputStream> inputStreamLoader() { return new CacheLoader<DFSInputStreamCaheKey, FSDataInputStream>() { @Override public FSDataInputStream load(DFSInputStreamCaheKey key) throws Exception { DFSClient client = getDfsClient(key.userId); DFSInputStream dis = client.open(key.inodePath); return client.createWrappedInputStream(dis); } }; } DFSClient getDfsClient(String userName) { DFSClient client = null; try { client = clientCache.get(userName); } catch (ExecutionException e) { LOG.error("Failed to create DFSClient for user:" + userName + " Cause:" + e); } return client; } FSDataInputStream getDfsInputStream(String userName, String inodePath) { DFSInputStreamCaheKey k = new DFSInputStreamCaheKey(userName, inodePath); FSDataInputStream s = null; try { s = inputstreamCache.get(k); } catch (ExecutionException e) { LOG.warn("Failed to create DFSInputStream for user:" + userName + " Cause:" + e); } return s; } public void invalidateDfsInputStream(String userName, String inodePath) { DFSInputStreamCaheKey k = new DFSInputStreamCaheKey(userName, inodePath); inputstreamCache.invalidate(k); } }
9,038
31.631769
98
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OffsetRange.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.util.Comparator; import com.google.common.base.Preconditions; /** * OffsetRange is the range of read/write request. A single point (e.g.,[5,5]) * is not a valid range. */ public class OffsetRange { public static final Comparator<OffsetRange> ReverseComparatorOnMin = new Comparator<OffsetRange>() { @Override public int compare(OffsetRange o1, OffsetRange o2) { if (o1.getMin() == o2.getMin()) { return o1.getMax() < o2.getMax() ? 1 : (o1.getMax() > o2.getMax() ? -1 : 0); } else { return o1.getMin() < o2.getMin() ? 1 : -1; } } }; private final long min; private final long max; OffsetRange(long min, long max) { Preconditions.checkArgument(min >= 0 && max >= 0 && min < max); this.min = min; this.max = max; } long getMin() { return min; } long getMax() { return max; } @Override public int hashCode() { return (int) (min ^ max); } @Override public boolean equals(Object o) { if (o instanceof OffsetRange) { OffsetRange range = (OffsetRange) o; return (min == range.getMin()) && (max == range.getMax()); } return false; } }
2,049
26.702703
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.jboss.netty.channel.Channel; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; /** * WriteCtx saves the context of one write request, such as request, channel, * xid and reply status. */ class WriteCtx { public static final Log LOG = LogFactory.getLog(WriteCtx.class); /** * In memory write data has 3 states. ALLOW_DUMP: not sequential write, still * wait for prerequisite writes. NO_DUMP: sequential write, no need to dump * since it will be written to HDFS soon. DUMPED: already dumped to a file. */ public static enum DataState { ALLOW_DUMP, NO_DUMP, DUMPED } private final FileHandle handle; private final long offset; private final int count; /** * Some clients can send a write that includes previously written data along * with new data. In such case the write request is changed to write from only * the new data. {@code originalCount} tracks the number of bytes sent in the * request before it was modified to write only the new data. * @see OpenFileCtx#addWritesToCache for more details */ private final int originalCount; public static final int INVALID_ORIGINAL_COUNT = -1; public int getOriginalCount() { return originalCount; } private final WriteStableHow stableHow; private volatile ByteBuffer data; private final Channel channel; private final int xid; private boolean replied; /** * Data belonging to the same {@link OpenFileCtx} may be dumped to a file. * After being dumped to the file, the corresponding {@link WriteCtx} records * the dump file and the offset. */ private RandomAccessFile raf; private long dumpFileOffset; private volatile DataState dataState; public final long startTime; public DataState getDataState() { return dataState; } public void setDataState(DataState dataState) { this.dataState = dataState; } /** * Writing the data into a local file. After the writing, if * {@link #dataState} is still ALLOW_DUMP, set {@link #data} to null and set * {@link #dataState} to DUMPED. */ long dumpData(FileOutputStream dumpOut, RandomAccessFile raf) throws IOException { if (dataState != DataState.ALLOW_DUMP) { if (LOG.isTraceEnabled()) { LOG.trace("No need to dump with status(replied,dataState):" + "(" + replied + "," + dataState + ")"); } return 0; } // Resized write should not allow dump Preconditions.checkState(originalCount == INVALID_ORIGINAL_COUNT); this.raf = raf; dumpFileOffset = dumpOut.getChannel().position(); dumpOut.write(data.array(), 0, count); if (LOG.isDebugEnabled()) { LOG.debug("After dump, new dumpFileOffset:" + dumpFileOffset); } // it is possible that while we dump the data, the data is also being // written back to HDFS. After dump, if the writing back has not finished // yet, we change its flag to DUMPED and set the data to null. Otherwise // this WriteCtx instance should have been removed from the buffer. if (dataState == DataState.ALLOW_DUMP) { synchronized (this) { if (dataState == DataState.ALLOW_DUMP) { data = null; dataState = DataState.DUMPED; return count; } } } return 0; } FileHandle getHandle() { return handle; } long getOffset() { return offset; } int getCount() { return count; } WriteStableHow getStableHow() { return stableHow; } @VisibleForTesting ByteBuffer getData() throws IOException { if (dataState != DataState.DUMPED) { synchronized (this) { if (dataState != DataState.DUMPED) { Preconditions.checkState(data != null); return data; } } } // read back from dumped file this.loadData(); return data; } private void loadData() throws IOException { Preconditions.checkState(data == null); byte[] rawData = new byte[count]; raf.seek(dumpFileOffset); int size = raf.read(rawData, 0, count); if (size != count) { throw new IOException("Data count is " + count + ", but read back " + size + "bytes"); } data = ByteBuffer.wrap(rawData); } public void writeData(HdfsDataOutputStream fos) throws IOException { Preconditions.checkState(fos != null); ByteBuffer dataBuffer; try { dataBuffer = getData(); } catch (Exception e1) { LOG.error("Failed to get request data offset:" + offset + " count:" + count + " error:" + e1); throw new IOException("Can't get WriteCtx.data"); } byte[] data = dataBuffer.array(); int position = dataBuffer.position(); int limit = dataBuffer.limit(); Preconditions.checkState(limit - position == count); // Modified write has a valid original count if (position != 0) { if (limit != getOriginalCount()) { throw new IOException("Modified write has differnt original size." + "buff position:" + position + " buff limit:" + limit + ". " + toString()); } } // Now write data fos.write(data, position, count); } Channel getChannel() { return channel; } int getXid() { return xid; } boolean getReplied() { return replied; } void setReplied(boolean replied) { this.replied = replied; } WriteCtx(FileHandle handle, long offset, int count, int originalCount, WriteStableHow stableHow, ByteBuffer data, Channel channel, int xid, boolean replied, DataState dataState) { this.handle = handle; this.offset = offset; this.count = count; this.originalCount = originalCount; this.stableHow = stableHow; this.data = data; this.channel = channel; this.xid = xid; this.replied = replied; this.dataState = dataState; raf = null; this.startTime = System.nanoTime(); } @Override public String toString() { return "Id:" + handle.getFileId() + " offset:" + offset + " count:" + count + " originalCount:" + originalCount + " stableHow:" + stableHow + " replied:" + replied + " dataState:" + dataState + " xid:" + xid; } }
7,483
29.177419
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3HttpServer.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.net.NetUtils; /** * Encapsulates the HTTP server started by the NFS3 gateway. */ class Nfs3HttpServer { private int infoPort; private int infoSecurePort; private HttpServer2 httpServer; private final NfsConfiguration conf; Nfs3HttpServer(NfsConfiguration conf) { this.conf = conf; } void start() throws IOException { final InetSocketAddress httpAddr = getHttpAddress(conf); final String httpsAddrString = conf.get( NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY, NfsConfigKeys.NFS_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf, httpAddr, httpsAddr, "nfs3", NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY); this.httpServer = builder.build(); this.httpServer.start(); HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); int connIdx = 0; if (policy.isHttpEnabled()) { infoPort = httpServer.getConnectorAddress(connIdx++).getPort(); } if (policy.isHttpsEnabled()) { infoSecurePort = httpServer.getConnectorAddress(connIdx).getPort(); } } void stop() throws IOException { if (httpServer != null) { try { httpServer.stop(); } catch (Exception e) { throw new IOException(e); } } } public int getPort() { return this.infoPort; } public int getSecurePort() { return this.infoSecurePort; } /** * Return the URI that locates the HTTP server. */ public URI getServerURI() { // getHttpClientScheme() only returns https for HTTPS_ONLY policy. This // matches the behavior that the first connector is a HTTPS connector only // for HTTPS_ONLY policy. InetSocketAddress addr = httpServer.getConnectorAddress(0); return URI.create(DFSUtil.getHttpClientScheme(conf) + "://" + NetUtils.getHostPortString(addr)); } public InetSocketAddress getHttpAddress(Configuration conf) { String addr = conf.get(NfsConfigKeys.NFS_HTTP_ADDRESS_KEY, NfsConfigKeys.NFS_HTTP_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr, NfsConfigKeys.NFS_HTTP_PORT_DEFAULT, NfsConfigKeys.NFS_HTTP_ADDRESS_KEY); } }
3,591
31.36036
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.net.DatagramSocket; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketAddress; import java.nio.ByteBuffer; import java.nio.charset.Charset; import java.util.EnumSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.DNS; import org.apache.hadoop.nfs.AccessPrivilege; import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.NfsFileType; import org.apache.hadoop.nfs.NfsTime; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.NFSPROC3; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3Interface; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.request.ACCESS3Request; import org.apache.hadoop.nfs.nfs3.request.COMMIT3Request; import org.apache.hadoop.nfs.nfs3.request.CREATE3Request; import org.apache.hadoop.nfs.nfs3.request.FSINFO3Request; import org.apache.hadoop.nfs.nfs3.request.FSSTAT3Request; import org.apache.hadoop.nfs.nfs3.request.GETATTR3Request; import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request; import org.apache.hadoop.nfs.nfs3.request.MKDIR3Request; import org.apache.hadoop.nfs.nfs3.request.PATHCONF3Request; import org.apache.hadoop.nfs.nfs3.request.READ3Request; import org.apache.hadoop.nfs.nfs3.request.READDIR3Request; import org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request; import org.apache.hadoop.nfs.nfs3.request.READLINK3Request; import org.apache.hadoop.nfs.nfs3.request.REMOVE3Request; import org.apache.hadoop.nfs.nfs3.request.RENAME3Request; import org.apache.hadoop.nfs.nfs3.request.RMDIR3Request; import org.apache.hadoop.nfs.nfs3.request.SETATTR3Request; import org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request; import org.apache.hadoop.nfs.nfs3.request.SetAttr3; import org.apache.hadoop.nfs.nfs3.request.SetAttr3.SetAttrField; import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response; import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response; import org.apache.hadoop.nfs.nfs3.response.CREATE3Response; import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response; import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response; import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response; import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response; import org.apache.hadoop.nfs.nfs3.response.MKDIR3Response; import org.apache.hadoop.nfs.nfs3.response.NFS3Response; import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response; import org.apache.hadoop.nfs.nfs3.response.READ3Response; import org.apache.hadoop.nfs.nfs3.response.READDIR3Response; import org.apache.hadoop.nfs.nfs3.response.READDIR3Response.DirList3; import org.apache.hadoop.nfs.nfs3.response.READDIR3Response.Entry3; import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response; import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response.DirListPlus3; import org.apache.hadoop.nfs.nfs3.response.READLINK3Response; import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response; import org.apache.hadoop.nfs.nfs3.response.RENAME3Response; import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response; import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response; import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WccAttr; import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcCallCache; import org.apache.hadoop.oncrpc.RpcDeniedReply; import org.apache.hadoop.oncrpc.RpcInfo; import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcReply; import org.apache.hadoop.oncrpc.RpcResponse; import org.apache.hadoop.oncrpc.RpcUtil; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.Credentials; import org.apache.hadoop.oncrpc.security.CredentialsSys; import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.apache.hadoop.oncrpc.security.SysSecurityHandler; import org.apache.hadoop.oncrpc.security.Verifier; import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.IdMappingConstant; import org.apache.hadoop.security.IdMappingServiceProvider; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.ShellBasedIdMapping; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.util.JvmPauseMonitor; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelHandlerContext; import com.google.common.annotations.VisibleForTesting; /** * RPC program corresponding to nfs daemon. See {@link Nfs3}. */ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { public static final int DEFAULT_UMASK = 0022; public static final FsPermission umask = new FsPermission( (short) DEFAULT_UMASK); static final Log LOG = LogFactory.getLog(RpcProgramNfs3.class); private final NfsConfiguration config; private final WriteManager writeManager; private final IdMappingServiceProvider iug; private final DFSClientCache clientCache; private final NfsExports exports; private final short replication; private final long blockSize; private final int bufferSize; private final boolean aixCompatMode; private String writeDumpDir; // The dir save dump files private final RpcCallCache rpcCallCache; private JvmPauseMonitor pauseMonitor; private Nfs3HttpServer infoServer = null; static Nfs3Metrics metrics; private String superuser; public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { super("NFS3", "localhost", config.getInt( NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket, allowInsecurePorts); this.config = config; config.set(FsPermission.UMASK_LABEL, "000"); iug = new ShellBasedIdMapping(config); aixCompatMode = config.getBoolean( NfsConfigKeys.AIX_COMPAT_MODE_KEY, NfsConfigKeys.AIX_COMPAT_MODE_DEFAULT); exports = NfsExports.getInstance(config); writeManager = new WriteManager(iug, config, aixCompatMode); clientCache = new DFSClientCache(config); replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); blockSize = config.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); bufferSize = config.getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT); boolean enableDump = config.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DEFAULT); UserGroupInformation.setConfiguration(config); SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY, NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY); superuser = config.get(NfsConfigKeys.NFS_SUPERUSER_KEY, NfsConfigKeys.NFS_SUPERUSER_DEFAULT); LOG.info("Configured HDFS superuser is " + superuser); if (!enableDump) { writeDumpDir = null; } else { clearDirectory(writeDumpDir); } rpcCallCache = new RpcCallCache("NFS3", 256); infoServer = new Nfs3HttpServer(config); } public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { DefaultMetricsSystem.initialize("Nfs3"); String displayName = DNS.getDefaultHost("default", "default") + config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT); metrics = Nfs3Metrics.create(config, displayName); return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts); } private void clearDirectory(String writeDumpDir) throws IOException { File dumpDir = new File(writeDumpDir); if (dumpDir.exists()) { LOG.info("Delete current dump directory " + writeDumpDir); if (!(FileUtil.fullyDelete(dumpDir))) { throw new IOException("Cannot remove current dump directory: " + dumpDir); } } LOG.info("Create new dump directory " + writeDumpDir); if (!dumpDir.mkdirs()) { throw new IOException("Cannot create dump directory " + dumpDir); } } @Override public void startDaemons() { if (pauseMonitor == null) { pauseMonitor = new JvmPauseMonitor(config); pauseMonitor.start(); metrics.getJvmMetrics().setPauseMonitor(pauseMonitor); } writeManager.startAsyncDataService(); try { infoServer.start(); } catch (IOException e) { LOG.error("failed to start web server", e); } } @Override public void stopDaemons() { if (writeManager != null) { writeManager.shutdownAsyncDataService(); } if (pauseMonitor != null) { pauseMonitor.stop(); } // Stop the web server if (infoServer != null) { try { infoServer.stop(); } catch (Exception e) { LOG.warn("Exception shutting down web server", e); } } } @VisibleForTesting Nfs3HttpServer getInfoServer() { return this.infoServer; } // Checks the type of IOException and maps it to appropriate Nfs3Status code. private int mapErrorStatus(IOException e) { if (e instanceof FileNotFoundException) { return Nfs3Status.NFS3ERR_STALE; } else if (e instanceof AccessControlException) { return Nfs3Status.NFS3ERR_ACCES; } else { return Nfs3Status.NFS3ERR_IO; } } /****************************************************** * RPC call handlers ******************************************************/ @Override public NFS3Response nullProcedure() { if (LOG.isDebugEnabled()) { LOG.debug("NFS NULL"); } return new NFS3Response(Nfs3Status.NFS3_OK); } @Override public GETATTR3Response getattr(XDR xdr, RpcInfo info) { return getattr(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting GETATTR3Response getattr(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } GETATTR3Request request; try { request = GETATTR3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid GETATTR request"); response.setStatus(Nfs3Status.NFS3ERR_INVAL); return response; } FileHandle handle = request.getHandle(); if (LOG.isDebugEnabled()) { LOG.debug("GETATTR for fileId: " + handle.getFileId() + " client: " + remoteAddress); } Nfs3FileAttributes attrs = null; try { attrs = writeManager.getFileAttr(dfsClient, handle, iug); } catch (RemoteException r) { LOG.warn("Exception ", r); IOException io = r.unwrapRemoteException(); /** * AuthorizationException can be thrown if the user can't be proxy'ed. */ if (io instanceof AuthorizationException) { return new GETATTR3Response(Nfs3Status.NFS3ERR_ACCES); } else { return new GETATTR3Response(Nfs3Status.NFS3ERR_IO); } } catch (IOException e) { LOG.info("Can't get file attribute, fileId=" + handle.getFileId(), e); int status = mapErrorStatus(e); response.setStatus(status); return response; } if (attrs == null) { LOG.error("Can't get path for fileId: " + handle.getFileId()); response.setStatus(Nfs3Status.NFS3ERR_STALE); return response; } response.setPostOpAttr(attrs); return response; } // Set attribute, don't support setting "size". For file/dir creation, mode is // set during creation and setMode should be false here. private void setattrInternal(DFSClient dfsClient, String fileIdPath, SetAttr3 newAttr, boolean setMode) throws IOException { EnumSet<SetAttrField> updateFields = newAttr.getUpdateFields(); if (setMode && updateFields.contains(SetAttrField.MODE)) { if (LOG.isDebugEnabled()) { LOG.debug("set new mode: " + newAttr.getMode()); } dfsClient.setPermission(fileIdPath, new FsPermission((short) (newAttr.getMode()))); } if (updateFields.contains(SetAttrField.UID) || updateFields.contains(SetAttrField.GID)) { String uname = updateFields.contains(SetAttrField.UID) ? iug.getUserName( newAttr.getUid(), IdMappingConstant.UNKNOWN_USER) : null; String gname = updateFields.contains(SetAttrField.GID) ? iug .getGroupName(newAttr.getGid(), IdMappingConstant.UNKNOWN_GROUP) : null; dfsClient.setOwner(fileIdPath, uname, gname); } long atime = updateFields.contains(SetAttrField.ATIME) ? newAttr.getAtime() .getMilliSeconds() : -1; long mtime = updateFields.contains(SetAttrField.MTIME) ? newAttr.getMtime() .getMilliSeconds() : -1; if (atime != -1 || mtime != -1) { if (LOG.isDebugEnabled()) { LOG.debug("set atime: " + +atime + " mtime: " + mtime); } dfsClient.setTimes(fileIdPath, mtime, atime); } } @Override public SETATTR3Response setattr(XDR xdr, RpcInfo info) { return setattr(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting SETATTR3Response setattr(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } SETATTR3Request request; try { request = SETATTR3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid SETATTR request"); response.setStatus(Nfs3Status.NFS3ERR_INVAL); return response; } FileHandle handle = request.getHandle(); if (LOG.isDebugEnabled()) { LOG.debug("NFS SETATTR fileId: " + handle.getFileId() + " client: " + remoteAddress); } if (request.getAttr().getUpdateFields().contains(SetAttrField.SIZE)) { LOG.error("Setting file size is not supported when setattr, fileId: " + handle.getFileId()); response.setStatus(Nfs3Status.NFS3ERR_INVAL); return response; } String fileIdPath = Nfs3Utils.getFileIdPath(handle); Nfs3FileAttributes preOpAttr = null; try { preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); if (preOpAttr == null) { LOG.info("Can't get path for fileId: " + handle.getFileId()); response.setStatus(Nfs3Status.NFS3ERR_STALE); return response; } WccAttr preOpWcc = Nfs3Utils.getWccAttr(preOpAttr); if (request.isCheck()) { if (!preOpAttr.getCtime().equals(request.getCtime())) { WccData wccData = new WccData(preOpWcc, preOpAttr); return new SETATTR3Response(Nfs3Status.NFS3ERR_NOT_SYNC, wccData); } } // check the write access privilege if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new SETATTR3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( preOpWcc, preOpAttr)); } setattrInternal(dfsClient, fileIdPath, request.getAttr(), true); Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); WccData wccData = new WccData(preOpWcc, postOpAttr); return new SETATTR3Response(Nfs3Status.NFS3_OK, wccData); } catch (IOException e) { LOG.warn("Exception ", e); WccData wccData = null; try { wccData = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpAttr), dfsClient, fileIdPath, iug); } catch (IOException e1) { LOG.info("Can't get postOpAttr for fileIdPath: " + fileIdPath, e1); } int status = mapErrorStatus(e); return new SETATTR3Response(status, wccData); } } @Override public LOOKUP3Response lookup(XDR xdr, RpcInfo info) { return lookup(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting LOOKUP3Response lookup(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } LOOKUP3Request request; try { request = LOOKUP3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid LOOKUP request"); return new LOOKUP3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle dirHandle = request.getHandle(); String fileName = request.getName(); if (LOG.isDebugEnabled()) { LOG.debug("NFS LOOKUP dir fileId: " + dirHandle.getFileId() + " name: " + fileName + " client: " + remoteAddress); } try { String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); Nfs3FileAttributes postOpObjAttr = writeManager.getFileAttr(dfsClient, dirHandle, fileName); if (postOpObjAttr == null) { if (LOG.isDebugEnabled()) { LOG.debug("NFS LOOKUP fileId: " + dirHandle.getFileId() + " name: " + fileName + " does not exist"); } Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); return new LOOKUP3Response(Nfs3Status.NFS3ERR_NOENT, null, null, postOpDirAttr); } Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (postOpDirAttr == null) { LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId()); return new LOOKUP3Response(Nfs3Status.NFS3ERR_STALE); } FileHandle fileHandle = new FileHandle(postOpObjAttr.getFileId()); return new LOOKUP3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr, postOpDirAttr); } catch (IOException e) { LOG.warn("Exception ", e); int status = mapErrorStatus(e); return new LOOKUP3Response(status); } } @Override public ACCESS3Response access(XDR xdr, RpcInfo info) { return access(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting ACCESS3Response access(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } ACCESS3Request request; try { request = ACCESS3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid ACCESS request"); return new ACCESS3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle handle = request.getHandle(); Nfs3FileAttributes attrs; if (LOG.isDebugEnabled()) { LOG.debug("NFS ACCESS fileId: " + handle.getFileId() + " client: " + remoteAddress); } try { attrs = writeManager.getFileAttr(dfsClient, handle, iug); if (attrs == null) { LOG.error("Can't get path for fileId: " + handle.getFileId()); return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE); } if(iug.getUserName(securityHandler.getUid(), "unknown").equals(superuser)) { int access = Nfs3Constant.ACCESS3_LOOKUP | Nfs3Constant.ACCESS3_DELETE | Nfs3Constant.ACCESS3_EXECUTE | Nfs3Constant.ACCESS3_EXTEND | Nfs3Constant.ACCESS3_MODIFY | Nfs3Constant.ACCESS3_READ; return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access); } int access = Nfs3Utils.getAccessRightsForUserGroup( securityHandler.getUid(), securityHandler.getGid(), securityHandler.getAuxGids(), attrs); return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access); } catch (RemoteException r) { LOG.warn("Exception ", r); IOException io = r.unwrapRemoteException(); /** * AuthorizationException can be thrown if the user can't be proxy'ed. */ if (io instanceof AuthorizationException) { return new ACCESS3Response(Nfs3Status.NFS3ERR_ACCES); } else { return new ACCESS3Response(Nfs3Status.NFS3ERR_IO); } } catch (IOException e) { LOG.warn("Exception ", e); int status = mapErrorStatus(e); return new ACCESS3Response(status); } } @Override public READLINK3Response readlink(XDR xdr, RpcInfo info) { return readlink(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting READLINK3Response readlink(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { READLINK3Response response = new READLINK3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } READLINK3Request request; try { request = READLINK3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid READLINK request"); return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle handle = request.getHandle(); if (LOG.isDebugEnabled()) { LOG.debug("NFS READLINK fileId: " + handle.getFileId() + " client: " + remoteAddress); } String fileIdPath = Nfs3Utils.getFileIdPath(handle); try { String target = dfsClient.getLinkTarget(fileIdPath); Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); if (postOpAttr == null) { LOG.info("Can't get path for fileId: " + handle.getFileId()); return new READLINK3Response(Nfs3Status.NFS3ERR_STALE); } if (postOpAttr.getType() != NfsFileType.NFSLNK.toValue()) { LOG.error("Not a symlink, fileId: " + handle.getFileId()); return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL); } if (target == null) { LOG.error("Symlink target should not be null, fileId: " + handle.getFileId()); return new READLINK3Response(Nfs3Status.NFS3ERR_SERVERFAULT); } int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT); if (rtmax < target.getBytes(Charset.forName("UTF-8")).length) { LOG.error("Link size: " + target.getBytes(Charset.forName("UTF-8")).length + " is larger than max transfer size: " + rtmax); return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr, new byte[0]); } return new READLINK3Response(Nfs3Status.NFS3_OK, postOpAttr, target.getBytes(Charset.forName("UTF-8"))); } catch (IOException e) { LOG.warn("Readlink error: " + e.getClass(), e); int status = mapErrorStatus(e); return new READLINK3Response(status); } } @Override public READ3Response read(XDR xdr, RpcInfo info) { return read(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting READ3Response read(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { READ3Response response = new READ3Response(Nfs3Status.NFS3_OK); final String userName = securityHandler.getUser(); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } DFSClient dfsClient = clientCache.getDfsClient(userName); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } READ3Request request; try { request = READ3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid READ request"); return new READ3Response(Nfs3Status.NFS3ERR_INVAL); } long offset = request.getOffset(); int count = request.getCount(); FileHandle handle = request.getHandle(); if (LOG.isDebugEnabled()) { LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset + " count: " + count + " client: " + remoteAddress); } Nfs3FileAttributes attrs; boolean eof; if (count == 0) { // Only do access check. try { // Don't read from cache. Client may not have read permission. attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug); } catch (IOException e) { if (LOG.isDebugEnabled()) { LOG.debug("Get error accessing file, fileId: " + handle.getFileId(), e); } return new READ3Response(Nfs3Status.NFS3ERR_IO); } if (attrs == null) { if (LOG.isDebugEnabled()) { LOG.debug("Can't get path for fileId: " + handle.getFileId()); } return new READ3Response(Nfs3Status.NFS3ERR_NOENT); } int access = Nfs3Utils.getAccessRightsForUserGroup( securityHandler.getUid(), securityHandler.getGid(), securityHandler.getAuxGids(), attrs); if ((access & Nfs3Constant.ACCESS3_READ) != 0) { eof = offset >= attrs.getSize(); return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof, ByteBuffer.wrap(new byte[0])); } else { return new READ3Response(Nfs3Status.NFS3ERR_ACCES); } } // In case there is buffered data for the same file, flush it. This can be // optimized later by reading from the cache. int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count); if (ret != Nfs3Status.NFS3_OK) { LOG.warn("commitBeforeRead didn't succeed with ret=" + ret + ". Read may not get most recent data."); } try { int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT); int buffSize = Math.min(rtmax, count); byte[] readbuffer = new byte[buffSize]; int readCount = 0; /** * Retry exactly once because the DFSInputStream can be stale. */ for (int i = 0; i < 1; ++i) { FSDataInputStream fis = clientCache.getDfsInputStream(userName, Nfs3Utils.getFileIdPath(handle)); if (fis == null) { return new READ3Response(Nfs3Status.NFS3ERR_ACCES); } try { readCount = fis.read(offset, readbuffer, 0, count); metrics.incrBytesRead(readCount); } catch (IOException e) { // TODO: A cleaner way is to throw a new type of exception // which requires incompatible changes. if (e.getMessage().equals("Stream closed")) { clientCache.invalidateDfsInputStream(userName, Nfs3Utils.getFileIdPath(handle)); continue; } else { throw e; } } } attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug); if (readCount < count) { LOG.info("Partical read. Asked offset: " + offset + " count: " + count + " and read back: " + readCount + " file size: " + attrs.getSize()); } // HDFS returns -1 for read beyond file size. if (readCount < 0) { readCount = 0; } eof = (offset + readCount) >= attrs.getSize(); return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof, ByteBuffer.wrap(readbuffer)); } catch (IOException e) { LOG.warn("Read error: " + e.getClass() + " offset: " + offset + " count: " + count, e); int status = mapErrorStatus(e); return new READ3Response(status); } } @Override public WRITE3Response write(XDR xdr, RpcInfo info) { SecurityHandler securityHandler = getSecurityHandler(info); RpcCall rpcCall = (RpcCall) info.header(); int xid = rpcCall.getXid(); SocketAddress remoteAddress = info.remoteAddress(); return write(xdr, info.channel(), xid, securityHandler, remoteAddress); } @VisibleForTesting WRITE3Response write(XDR xdr, Channel channel, int xid, SecurityHandler securityHandler, SocketAddress remoteAddress) { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } WRITE3Request request; try { request = WRITE3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid WRITE request"); return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL); } long offset = request.getOffset(); int count = request.getCount(); WriteStableHow stableHow = request.getStableHow(); byte[] data = request.getData().array(); if (data.length < count) { LOG.error("Invalid argument, data size is less than count in request"); return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle handle = request.getHandle(); if (LOG.isDebugEnabled()) { LOG.debug("NFS WRITE fileId: " + handle.getFileId() + " offset: " + offset + " length: " + count + " stableHow: " + stableHow.getValue() + " xid: " + xid + " client: " + remoteAddress); } Nfs3FileAttributes preOpAttr = null; try { preOpAttr = writeManager.getFileAttr(dfsClient, handle, iug); if (preOpAttr == null) { LOG.error("Can't get path for fileId: " + handle.getFileId()); return new WRITE3Response(Nfs3Status.NFS3ERR_STALE); } if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new WRITE3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); } if (LOG.isDebugEnabled()) { LOG.debug("requested offset=" + offset + " and current filesize=" + preOpAttr.getSize()); } writeManager.handleWrite(dfsClient, request, channel, xid, preOpAttr); } catch (IOException e) { LOG.info("Error writing to fileId " + handle.getFileId() + " at offset " + offset + " and length " + data.length, e); // Try to return WccData Nfs3FileAttributes postOpAttr = null; try { postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug); } catch (IOException e1) { LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1); } WccAttr attr = preOpAttr == null ? null : Nfs3Utils.getWccAttr(preOpAttr); WccData fileWcc = new WccData(attr, postOpAttr); int status = mapErrorStatus(e); return new WRITE3Response(status, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); } return null; } @Override public CREATE3Response create(XDR xdr, RpcInfo info) { return create(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting CREATE3Response create(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } CREATE3Request request; try { request = CREATE3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid CREATE request"); return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle dirHandle = request.getHandle(); String fileName = request.getName(); if (LOG.isDebugEnabled()) { LOG.debug("NFS CREATE dir fileId: " + dirHandle.getFileId() + " filename: " + fileName + " client: " + remoteAddress); } int createMode = request.getMode(); if ((createMode != Nfs3Constant.CREATE_EXCLUSIVE) && request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE) && request.getObjAttr().getSize() != 0) { LOG.error("Setting file size is not supported when creating file: " + fileName + " dir fileId: " + dirHandle.getFileId()); return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL); } HdfsDataOutputStream fos = null; String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); Nfs3FileAttributes preOpDirAttr = null; Nfs3FileAttributes postOpObjAttr = null; FileHandle fileHandle = null; WccData dirWcc = null; try { preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (preOpDirAttr == null) { LOG.error("Can't get path for dirHandle: " + dirHandle); return new CREATE3Response(Nfs3Status.NFS3ERR_STALE); } if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr)); } String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName; SetAttr3 setAttr3 = request.getObjAttr(); assert (setAttr3 != null); FsPermission permission = setAttr3.getUpdateFields().contains( SetAttrField.MODE) ? new FsPermission((short) setAttr3.getMode()) : FsPermission.getDefault().applyUMask(umask); EnumSet<CreateFlag> flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet.of(CreateFlag.CREATE); fos = dfsClient.createWrappedOutputStream( dfsClient.create(fileIdPath, permission, flag, false, replication, blockSize, null, bufferSize, null), null); if ((createMode == Nfs3Constant.CREATE_UNCHECKED) || (createMode == Nfs3Constant.CREATE_GUARDED)) { // Set group if it's not specified in the request. if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) { setAttr3.getUpdateFields().add(SetAttrField.GID); setAttr3.setGid(securityHandler.getGid()); } setattrInternal(dfsClient, fileIdPath, setAttr3, false); } postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); // Add open stream OpenFileCtx openFileCtx = new OpenFileCtx(fos, postOpObjAttr, writeDumpDir + "/" + postOpObjAttr.getFileId(), dfsClient, iug, aixCompatMode, config); fileHandle = new FileHandle(postOpObjAttr.getFileId()); if (!writeManager.addOpenFileStream(fileHandle, openFileCtx)) { LOG.warn("Can't add more stream, close it." + " Future write will become append"); fos.close(); fos = null; } else { if (LOG.isDebugEnabled()) { LOG.debug("Opened stream for file: " + fileName + ", fileId: " + fileHandle.getFileId()); } } } catch (IOException e) { LOG.error("Exception", e); if (fos != null) { try { fos.close(); } catch (IOException e1) { LOG.error("Can't close stream for dirFileId: " + dirHandle.getFileId() + " filename: " + fileName, e1); } } if (dirWcc == null) { try { dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); } catch (IOException e1) { LOG.error("Can't get postOpDirAttr for dirFileId: " + dirHandle.getFileId(), e1); } } int status = mapErrorStatus(e); return new CREATE3Response(status, fileHandle, postOpObjAttr, dirWcc); } return new CREATE3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr, dirWcc); } @Override public MKDIR3Response mkdir(XDR xdr, RpcInfo info) { return mkdir(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting MKDIR3Response mkdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } MKDIR3Request request; try { request = MKDIR3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid MKDIR request"); return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle dirHandle = request.getHandle(); String fileName = request.getName(); if (LOG.isDebugEnabled()) { LOG.debug("NFS MKDIR dirId: " + dirHandle.getFileId() + " filename: " + fileName + " client: " + remoteAddress); } if (request.getObjAttr().getUpdateFields().contains(SetAttrField.SIZE)) { LOG.error("Setting file size is not supported when mkdir: " + fileName + " in dirHandle" + dirHandle); return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL); } String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); Nfs3FileAttributes preOpDirAttr = null; Nfs3FileAttributes postOpDirAttr = null; Nfs3FileAttributes postOpObjAttr = null; FileHandle objFileHandle = null; try { preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (preOpDirAttr == null) { LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId()); return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE); } if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new MKDIR3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr)); } final String fileIdPath = dirFileIdPath + "/" + fileName; SetAttr3 setAttr3 = request.getObjAttr(); FsPermission permission = setAttr3.getUpdateFields().contains( SetAttrField.MODE) ? new FsPermission((short) setAttr3.getMode()) : FsPermission.getDefault().applyUMask(umask); if (!dfsClient.mkdirs(fileIdPath, permission, false)) { WccData dirWcc = Nfs3Utils.createWccData( Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); return new MKDIR3Response(Nfs3Status.NFS3ERR_IO, null, null, dirWcc); } // Set group if it's not specified in the request. if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) { setAttr3.getUpdateFields().add(SetAttrField.GID); setAttr3.setGid(securityHandler.getGid()); } setattrInternal(dfsClient, fileIdPath, setAttr3, false); postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); objFileHandle = new FileHandle(postOpObjAttr.getFileId()); WccData dirWcc = Nfs3Utils.createWccData( Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); return new MKDIR3Response(Nfs3Status.NFS3_OK, new FileHandle( postOpObjAttr.getFileId()), postOpObjAttr, dirWcc); } catch (IOException e) { LOG.warn("Exception ", e); // Try to return correct WccData if (postOpDirAttr == null) { try { postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); } catch (IOException e1) { LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e); } } WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr); int status = mapErrorStatus(e); return new MKDIR3Response(status, objFileHandle, postOpObjAttr, dirWcc); } } @Override public READDIR3Response mknod(XDR xdr, RpcInfo info) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } @Override public REMOVE3Response remove(XDR xdr, RpcInfo info) { return remove(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } REMOVE3Request request; try { request = REMOVE3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid REMOVE request"); return new REMOVE3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle dirHandle = request.getHandle(); String fileName = request.getName(); if (LOG.isDebugEnabled()) { LOG.debug("NFS REMOVE dir fileId: " + dirHandle.getFileId() + " fileName: " + fileName + " client: " + remoteAddress); } String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); Nfs3FileAttributes preOpDirAttr = null; Nfs3FileAttributes postOpDirAttr = null; try { preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (preOpDirAttr == null) { LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId()); return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE); } WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, errWcc); } String fileIdPath = dirFileIdPath + "/" + fileName; HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath); if (fstat == null) { return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc); } if (fstat.isDir()) { return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc); } boolean result = dfsClient.delete(fileIdPath, false); WccData dirWcc = Nfs3Utils.createWccData( Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); if (!result) { return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc); } return new REMOVE3Response(Nfs3Status.NFS3_OK, dirWcc); } catch (IOException e) { LOG.warn("Exception ", e); // Try to return correct WccData if (postOpDirAttr == null) { try { postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); } catch (IOException e1) { LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1); } } WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr); int status = mapErrorStatus(e); return new REMOVE3Response(status, dirWcc); } } @Override public RMDIR3Response rmdir(XDR xdr, RpcInfo info) { return rmdir(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } RMDIR3Request request; try { request = RMDIR3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid RMDIR request"); return new RMDIR3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle dirHandle = request.getHandle(); String fileName = request.getName(); if (LOG.isDebugEnabled()) { LOG.debug("NFS RMDIR dir fileId: " + dirHandle.getFileId() + " fileName: " + fileName + " client: " + remoteAddress); } String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); Nfs3FileAttributes preOpDirAttr = null; Nfs3FileAttributes postOpDirAttr = null; try { preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (preOpDirAttr == null) { LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId()); return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE); } WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc); } String fileIdPath = dirFileIdPath + "/" + fileName; HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath); if (fstat == null) { return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc); } if (!fstat.isDir()) { return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc); } if (fstat.getChildrenNum() > 0) { return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTEMPTY, errWcc); } boolean result = dfsClient.delete(fileIdPath, false); WccData dirWcc = Nfs3Utils.createWccData( Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); if (!result) { return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc); } return new RMDIR3Response(Nfs3Status.NFS3_OK, dirWcc); } catch (IOException e) { LOG.warn("Exception ", e); // Try to return correct WccData if (postOpDirAttr == null) { try { postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); } catch (IOException e1) { LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1); } } WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr); int status = mapErrorStatus(e); return new RMDIR3Response(status, dirWcc); } } @Override public RENAME3Response rename(XDR xdr, RpcInfo info) { return rename(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting RENAME3Response rename(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } RENAME3Request request = null; try { request = RENAME3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid RENAME request"); return new RENAME3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle fromHandle = request.getFromDirHandle(); String fromName = request.getFromName(); FileHandle toHandle = request.getToDirHandle(); String toName = request.getToName(); if (LOG.isDebugEnabled()) { LOG.debug("NFS RENAME from: " + fromHandle.getFileId() + "/" + fromName + " to: " + toHandle.getFileId() + "/" + toName + " client: " + remoteAddress); } String fromDirFileIdPath = Nfs3Utils.getFileIdPath(fromHandle); String toDirFileIdPath = Nfs3Utils.getFileIdPath(toHandle); Nfs3FileAttributes fromPreOpAttr = null; Nfs3FileAttributes toPreOpAttr = null; WccData fromDirWcc = null; WccData toDirWcc = null; try { fromPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, fromDirFileIdPath, iug); if (fromPreOpAttr == null) { LOG.info("Can't get path for fromHandle fileId: " + fromHandle.getFileId()); return new RENAME3Response(Nfs3Status.NFS3ERR_STALE); } toPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, toDirFileIdPath, iug); if (toPreOpAttr == null) { LOG.info("Can't get path for toHandle fileId: " + toHandle.getFileId()); return new RENAME3Response(Nfs3Status.NFS3ERR_STALE); } if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { WccData fromWcc = new WccData(Nfs3Utils.getWccAttr(fromPreOpAttr), fromPreOpAttr); WccData toWcc = new WccData(Nfs3Utils.getWccAttr(toPreOpAttr), toPreOpAttr); return new RENAME3Response(Nfs3Status.NFS3ERR_ACCES, fromWcc, toWcc); } String src = fromDirFileIdPath + "/" + fromName; String dst = toDirFileIdPath + "/" + toName; dfsClient.rename(src, dst, Options.Rename.NONE); // Assemble the reply fromDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(fromPreOpAttr), dfsClient, fromDirFileIdPath, iug); toDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(toPreOpAttr), dfsClient, toDirFileIdPath, iug); return new RENAME3Response(Nfs3Status.NFS3_OK, fromDirWcc, toDirWcc); } catch (IOException e) { LOG.warn("Exception ", e); // Try to return correct WccData try { fromDirWcc = Nfs3Utils.createWccData( Nfs3Utils.getWccAttr(fromPreOpAttr), dfsClient, fromDirFileIdPath, iug); toDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(toPreOpAttr), dfsClient, toDirFileIdPath, iug); } catch (IOException e1) { LOG.info("Can't get postOpDirAttr for " + fromDirFileIdPath + " or" + toDirFileIdPath, e1); } int status = mapErrorStatus(e); return new RENAME3Response(status, fromDirWcc, toDirWcc); } } @Override public SYMLINK3Response symlink(XDR xdr, RpcInfo info) { return symlink(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting SYMLINK3Response symlink(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { SYMLINK3Response response = new SYMLINK3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } SYMLINK3Request request; try { request = SYMLINK3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid SYMLINK request"); response.setStatus(Nfs3Status.NFS3ERR_INVAL); return response; } FileHandle dirHandle = request.getHandle(); String name = request.getName(); String symData = request.getSymData(); String linkDirIdPath = Nfs3Utils.getFileIdPath(dirHandle); // Don't do any name check to source path, just leave it to HDFS String linkIdPath = linkDirIdPath + "/" + name; if (LOG.isDebugEnabled()) { LOG.debug("NFS SYMLINK, target: " + symData + " link: " + linkIdPath + " client: " + remoteAddress); } try { WccData dirWcc = response.getDirWcc(); WccAttr preOpAttr = Nfs3Utils.getWccAttr(dfsClient, linkDirIdPath); dirWcc.setPreOpAttr(preOpAttr); dfsClient.createSymlink(symData, linkIdPath, false); // Set symlink attr is considered as to change the attr of the target // file. So no need to set symlink attr here after it's created. HdfsFileStatus linkstat = dfsClient.getFileLinkInfo(linkIdPath); Nfs3FileAttributes objAttr = Nfs3Utils.getNfs3FileAttrFromFileStatus( linkstat, iug); dirWcc .setPostOpAttr(Nfs3Utils.getFileAttr(dfsClient, linkDirIdPath, iug)); return new SYMLINK3Response(Nfs3Status.NFS3_OK, new FileHandle( objAttr.getFileId()), objAttr, dirWcc); } catch (IOException e) { LOG.warn("Exception: " + e); int status = mapErrorStatus(e); response.setStatus(status); return response; } } @Override public READDIR3Response link(XDR xdr, RpcInfo info) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } /** * Used by readdir and readdirplus to get dirents. It retries the listing if * the startAfter can't be found anymore. */ private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath, byte[] startAfter) throws IOException { DirectoryListing dlisting; try { dlisting = dfsClient.listPaths(dirFileIdPath, startAfter); } catch (RemoteException e) { IOException io = e.unwrapRemoteException(); if (!(io instanceof DirectoryListingStartAfterNotFoundException)) { throw io; } // This happens when startAfter was just deleted LOG.info("Cookie couldn't be found: " + new String(startAfter, Charset.forName("UTF-8")) + ", do listing from beginning"); dlisting = dfsClient .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME); } return dlisting; } @Override public READDIR3Response readdir(XDR xdr, RpcInfo info) { return readdir(xdr, getSecurityHandler(info), info.remoteAddress()); } public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } READDIR3Request request; try { request = READDIR3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid READDIR request"); return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle handle = request.getHandle(); long cookie = request.getCookie(); if (cookie < 0) { LOG.error("Invalid READDIR request, with negative cookie: " + cookie); return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL); } long count = request.getCount(); if (count <= 0) { LOG.info("Nonpositive count in invalid READDIR request: " + count); return new READDIR3Response(Nfs3Status.NFS3_OK); } if (LOG.isDebugEnabled()) { LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: " + cookie + " count: " + count + " client: " + remoteAddress); } HdfsFileStatus dirStatus; DirectoryListing dlisting; Nfs3FileAttributes postOpAttr; long dotdotFileId = 0; try { String dirFileIdPath = Nfs3Utils.getFileIdPath(handle); dirStatus = dfsClient.getFileInfo(dirFileIdPath); if (dirStatus == null) { LOG.info("Can't get path for fileId: " + handle.getFileId()); return new READDIR3Response(Nfs3Status.NFS3ERR_STALE); } if (!dirStatus.isDir()) { LOG.error("Can't readdir for regular file, fileId: " + handle.getFileId()); return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR); } long cookieVerf = request.getCookieVerf(); if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) { if (aixCompatMode) { // The AIX NFS client misinterprets RFC-1813 and will repeatedly send // the same cookieverf value even across VFS-level readdir calls, // instead of getting a new cookieverf for every VFS-level readdir // call, and reusing the cookieverf only in the event that multiple // incremental NFS-level readdir calls must be made to fetch all of // the directory entries. This means that whenever a readdir call is // made by an AIX NFS client for a given directory, and that directory // is subsequently modified, thus changing its mtime, no later readdir // calls will succeed from AIX for that directory until the FS is // unmounted/remounted. See HDFS-6549 for more info. LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " + "mismatches."); } else { LOG.error("CookieVerf mismatch. request cookieVerf: " + cookieVerf + " dir cookieVerf: " + dirStatus.getModificationTime()); return new READDIR3Response( Nfs3Status.NFS3ERR_BAD_COOKIE, Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug)); } } if (cookie == 0) { // Get dotdot fileId String dotdotFileIdPath = dirFileIdPath + "/.."; HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath); if (dotdotStatus == null) { // This should not happen throw new IOException("Can't get path for handle path: " + dotdotFileIdPath); } dotdotFileId = dotdotStatus.getFileId(); } // Get the list from the resume point byte[] startAfter; if(cookie == 0 ) { startAfter = HdfsFileStatus.EMPTY_NAME; } else { String inodeIdPath = Nfs3Utils.getFileIdPath(cookie); startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8")); } dlisting = listPaths(dfsClient, dirFileIdPath, startAfter); postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (postOpAttr == null) { LOG.error("Can't get path for fileId: " + handle.getFileId()); return new READDIR3Response(Nfs3Status.NFS3ERR_STALE); } } catch (IOException e) { LOG.warn("Exception ", e); int status = mapErrorStatus(e); return new READDIR3Response(status); } /** * Set up the dirents in the response. fileId is used as the cookie with one * exception. Linux client can either be stuck with "ls" command (on REHL) * or report "Too many levels of symbolic links" (Ubuntu). * * The problem is that, only two items returned, "." and ".." when the * namespace is empty. Both of them are "/" with the same cookie(root * fileId). Linux client doesn't think such a directory is a real directory. * Even though NFS protocol specifies cookie is an opaque data, Linux client * somehow doesn't like an empty dir returns same cookie for both "." and * "..". * * The workaround is to use 0 as the cookie for "." and always return "." as * the first entry in readdir/readdirplus response. */ HdfsFileStatus[] fstatus = dlisting.getPartialListing(); int n = (int) Math.min(fstatus.length, count-2); boolean eof = (n >= fstatus.length) && !dlisting.hasMore(); Entry3[] entries; if (cookie == 0) { entries = new Entry3[n + 2]; entries[0] = new READDIR3Response.Entry3(postOpAttr.getFileId(), ".", 0); entries[1] = new READDIR3Response.Entry3(dotdotFileId, "..", dotdotFileId); for (int i = 2; i < n + 2; i++) { entries[i] = new READDIR3Response.Entry3(fstatus[i - 2].getFileId(), fstatus[i - 2].getLocalName(), fstatus[i - 2].getFileId()); } } else { // Resume from last readdirplus. If the cookie is "..", the result // list is up the directory content since HDFS uses name as resume point. entries = new Entry3[n]; for (int i = 0; i < n; i++) { entries[i] = new READDIR3Response.Entry3(fstatus[i].getFileId(), fstatus[i].getLocalName(), fstatus[i].getFileId()); } } DirList3 dirList = new READDIR3Response.DirList3(entries, eof); return new READDIR3Response(Nfs3Status.NFS3_OK, postOpAttr, dirStatus.getModificationTime(), dirList); } @Override public READDIRPLUS3Response readdirplus(XDR xdr, RpcInfo info) { return readdirplus(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting READDIRPLUS3Response readdirplus(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES); } DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_SERVERFAULT); } READDIRPLUS3Request request = null; try { request = READDIRPLUS3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid READDIRPLUS request"); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle handle = request.getHandle(); long cookie = request.getCookie(); if (cookie < 0) { LOG.error("Invalid READDIRPLUS request, with negative cookie: " + cookie); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL); } long dirCount = request.getDirCount(); if (dirCount <= 0) { LOG.info("Nonpositive dircount in invalid READDIRPLUS request: " + dirCount); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL); } int maxCount = request.getMaxCount(); if (maxCount <= 0) { LOG.info("Nonpositive maxcount in invalid READDIRPLUS request: " + maxCount); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL); } if (LOG.isDebugEnabled()) { LOG.debug("NFS READDIRPLUS fileId: " + handle.getFileId() + " cookie: " + cookie + " dirCount: " + dirCount + " maxCount: " + maxCount + " client: " + remoteAddress); } HdfsFileStatus dirStatus; DirectoryListing dlisting; Nfs3FileAttributes postOpDirAttr; long dotdotFileId = 0; HdfsFileStatus dotdotStatus = null; try { String dirFileIdPath = Nfs3Utils.getFileIdPath(handle); dirStatus = dfsClient.getFileInfo(dirFileIdPath); if (dirStatus == null) { LOG.info("Can't get path for fileId: " + handle.getFileId()); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE); } if (!dirStatus.isDir()) { LOG.error("Can't readdirplus for regular file, fileId: " + handle.getFileId()); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR); } long cookieVerf = request.getCookieVerf(); if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) { if (aixCompatMode) { // The AIX NFS client misinterprets RFC-1813 and will repeatedly send // the same cookieverf value even across VFS-level readdir calls, // instead of getting a new cookieverf for every VFS-level readdir // call. This means that whenever a readdir call is made by an AIX NFS // client for a given directory, and that directory is subsequently // modified, thus changing its mtime, no later readdir calls will // succeed for that directory from AIX until the FS is // unmounted/remounted. See HDFS-6549 for more info. LOG.warn("AIX compatibility mode enabled, ignoring cookieverf " + "mismatches."); } else { LOG.error("cookieverf mismatch. request cookieverf: " + cookieVerf + " dir cookieverf: " + dirStatus.getModificationTime()); return new READDIRPLUS3Response( Nfs3Status.NFS3ERR_BAD_COOKIE, Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug), 0, null); } } if (cookie == 0) { // Get dotdot fileId String dotdotFileIdPath = dirFileIdPath + "/.."; dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath); if (dotdotStatus == null) { // This should not happen throw new IOException("Can't get path for handle path: " + dotdotFileIdPath); } dotdotFileId = dotdotStatus.getFileId(); } // Get the list from the resume point byte[] startAfter; if (cookie == 0) { startAfter = HdfsFileStatus.EMPTY_NAME; } else { String inodeIdPath = Nfs3Utils.getFileIdPath(cookie); startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8")); } dlisting = listPaths(dfsClient, dirFileIdPath, startAfter); postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (postOpDirAttr == null) { LOG.info("Can't get path for fileId: " + handle.getFileId()); return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE); } } catch (IOException e) { LOG.warn("Exception ", e); int status = mapErrorStatus(e); return new READDIRPLUS3Response(status); } // Set up the dirents in the response HdfsFileStatus[] fstatus = dlisting.getPartialListing(); int n = (int) Math.min(fstatus.length, dirCount-2); boolean eof = (n >= fstatus.length) && !dlisting.hasMore(); READDIRPLUS3Response.EntryPlus3[] entries; if (cookie == 0) { entries = new READDIRPLUS3Response.EntryPlus3[n+2]; entries[0] = new READDIRPLUS3Response.EntryPlus3( postOpDirAttr.getFileId(), ".", 0, postOpDirAttr, new FileHandle( postOpDirAttr.getFileId())); entries[1] = new READDIRPLUS3Response.EntryPlus3(dotdotFileId, "..", dotdotFileId, Nfs3Utils.getNfs3FileAttrFromFileStatus(dotdotStatus, iug), new FileHandle(dotdotFileId)); for (int i = 2; i < n + 2; i++) { long fileId = fstatus[i - 2].getFileId(); FileHandle childHandle = new FileHandle(fileId); Nfs3FileAttributes attr; try { attr = writeManager.getFileAttr(dfsClient, childHandle, iug); } catch (IOException e) { LOG.error("Can't get file attributes for fileId: " + fileId, e); continue; } entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId, fstatus[i - 2].getLocalName(), fileId, attr, childHandle); } } else { // Resume from last readdirplus. If the cookie is "..", the result // list is up the directory content since HDFS uses name as resume point. entries = new READDIRPLUS3Response.EntryPlus3[n]; for (int i = 0; i < n; i++) { long fileId = fstatus[i].getFileId(); FileHandle childHandle = new FileHandle(fileId); Nfs3FileAttributes attr; try { attr = writeManager.getFileAttr(dfsClient, childHandle, iug); } catch (IOException e) { LOG.error("Can't get file attributes for fileId: " + fileId, e); continue; } entries[i] = new READDIRPLUS3Response.EntryPlus3(fileId, fstatus[i].getLocalName(), fileId, attr, childHandle); } } DirListPlus3 dirListPlus = new READDIRPLUS3Response.DirListPlus3(entries, eof); return new READDIRPLUS3Response(Nfs3Status.NFS3_OK, postOpDirAttr, dirStatus.getModificationTime(), dirListPlus); } @Override public FSSTAT3Response fsstat(XDR xdr, RpcInfo info) { return fsstat(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } FSSTAT3Request request; try { request = FSSTAT3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid FSSTAT request"); return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle handle = request.getHandle(); if (LOG.isDebugEnabled()) { LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client: " + remoteAddress); } try { FsStatus fsStatus = dfsClient.getDiskStatus(); long totalBytes = fsStatus.getCapacity(); long freeBytes = fsStatus.getRemaining(); Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle, iug); if (attrs == null) { LOG.info("Can't get path for fileId: " + handle.getFileId()); return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE); } long maxFsObjects = config.getLong("dfs.max.objects", 0); if (maxFsObjects == 0) { // A value of zero in HDFS indicates no limit to the number // of objects that dfs supports. Using Integer.MAX_VALUE instead of // Long.MAX_VALUE so 32bit client won't complain. maxFsObjects = Integer.MAX_VALUE; } return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes, freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0); } catch (RemoteException r) { LOG.warn("Exception ", r); IOException io = r.unwrapRemoteException(); /** * AuthorizationException can be thrown if the user can't be proxy'ed. */ if (io instanceof AuthorizationException) { return new FSSTAT3Response(Nfs3Status.NFS3ERR_ACCES); } else { return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO); } } catch (IOException e) { LOG.warn("Exception ", e); int status = mapErrorStatus(e); return new FSSTAT3Response(status); } } @Override public FSINFO3Response fsinfo(XDR xdr, RpcInfo info) { return fsinfo(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting FSINFO3Response fsinfo(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } FSINFO3Request request; try { request = FSINFO3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid FSINFO request"); return new FSINFO3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle handle = request.getHandle(); if (LOG.isDebugEnabled()) { LOG.debug("NFS FSINFO fileId: " + handle.getFileId() + " client: " + remoteAddress); } try { int rtmax = config.getInt( NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT); int wtmax = config.getInt( NfsConfigKeys.DFS_NFS_MAX_WRITE_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_WRITE_TRANSFER_SIZE_DEFAULT); int dtperf = config.getInt( NfsConfigKeys.DFS_NFS_MAX_READDIR_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READDIR_TRANSFER_SIZE_DEFAULT); Nfs3FileAttributes attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug); if (attrs == null) { LOG.info("Can't get path for fileId: " + handle.getFileId()); return new FSINFO3Response(Nfs3Status.NFS3ERR_STALE); } int fsProperty = Nfs3Constant.FSF3_CANSETTIME | Nfs3Constant.FSF3_HOMOGENEOUS; return new FSINFO3Response(Nfs3Status.NFS3_OK, attrs, rtmax, rtmax, 1, wtmax, wtmax, 1, dtperf, Long.MAX_VALUE, new NfsTime(1), fsProperty); } catch (IOException e) { LOG.warn("Exception ", e); int status = mapErrorStatus(e); return new FSINFO3Response(status); } } @Override public PATHCONF3Response pathconf(XDR xdr, RpcInfo info) { return pathconf(xdr, getSecurityHandler(info), info.remoteAddress()); } @VisibleForTesting PATHCONF3Response pathconf(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) { PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) { response.setStatus(Nfs3Status.NFS3ERR_ACCES); return response; } DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } PATHCONF3Request request; try { request = PATHCONF3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid PATHCONF request"); return new PATHCONF3Response(Nfs3Status.NFS3ERR_INVAL); } FileHandle handle = request.getHandle(); Nfs3FileAttributes attrs; if (LOG.isDebugEnabled()) { LOG.debug("NFS PATHCONF fileId: " + handle.getFileId() + " client: " + remoteAddress); } try { attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle), iug); if (attrs == null) { LOG.info("Can't get path for fileId: " + handle.getFileId()); return new PATHCONF3Response(Nfs3Status.NFS3ERR_STALE); } return new PATHCONF3Response(Nfs3Status.NFS3_OK, attrs, 0, HdfsServerConstants.MAX_PATH_LENGTH, true, false, false, true); } catch (IOException e) { LOG.warn("Exception ", e); int status = mapErrorStatus(e); return new PATHCONF3Response(status); } } @Override public COMMIT3Response commit(XDR xdr, RpcInfo info) { SecurityHandler securityHandler = getSecurityHandler(info); RpcCall rpcCall = (RpcCall) info.header(); int xid = rpcCall.getXid(); SocketAddress remoteAddress = info.remoteAddress(); return commit(xdr, info.channel(), xid, securityHandler, remoteAddress); } @VisibleForTesting COMMIT3Response commit(XDR xdr, Channel channel, int xid, SecurityHandler securityHandler, SocketAddress remoteAddress) { COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK); DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; } COMMIT3Request request; try { request = COMMIT3Request.deserialize(xdr); } catch (IOException e) { LOG.error("Invalid COMMIT request"); response.setStatus(Nfs3Status.NFS3ERR_INVAL); return response; } FileHandle handle = request.getHandle(); if (LOG.isDebugEnabled()) { LOG.debug("NFS COMMIT fileId: " + handle.getFileId() + " offset=" + request.getOffset() + " count=" + request.getCount() + " client: " + remoteAddress); } String fileIdPath = Nfs3Utils.getFileIdPath(handle); Nfs3FileAttributes preOpAttr = null; try { preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); if (preOpAttr == null) { LOG.info("Can't get path for fileId: " + handle.getFileId()); return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE); } if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) { return new COMMIT3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), Nfs3Constant.WRITE_COMMIT_VERF); } long commitOffset = (request.getCount() == 0) ? 0 : (request.getOffset() + request.getCount()); // Insert commit as an async request writeManager.handleCommit(dfsClient, handle, commitOffset, channel, xid, preOpAttr); return null; } catch (IOException e) { LOG.warn("Exception ", e); Nfs3FileAttributes postOpAttr = null; try { postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug); } catch (IOException e1) { LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1); } WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr); int status = mapErrorStatus(e); return new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF); } } private SecurityHandler getSecurityHandler(Credentials credentials, Verifier verifier) { if (credentials instanceof CredentialsSys) { return new SysSecurityHandler((CredentialsSys) credentials, iug); } else { // TODO: support GSS and handle other cases return null; } } private SecurityHandler getSecurityHandler(RpcInfo info) { RpcCall rpcCall = (RpcCall) info.header(); return getSecurityHandler(rpcCall.getCredential(), rpcCall.getVerifier()); } @Override public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { RpcCall rpcCall = (RpcCall) info.header(); final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure()); int xid = rpcCall.getXid(); byte[] data = new byte[info.data().readableBytes()]; info.data().readBytes(data); XDR xdr = new XDR(data); XDR out = new XDR(); InetAddress client = ((InetSocketAddress) info.remoteAddress()) .getAddress(); Credentials credentials = rpcCall.getCredential(); // Ignore auth only for NFSPROC3_NULL, especially for Linux clients. if (nfsproc3 != NFSPROC3.NULL) { if (credentials.getFlavor() != AuthFlavor.AUTH_SYS && credentials.getFlavor() != AuthFlavor.RPCSEC_GSS) { LOG.info("Wrong RPC AUTH flavor, " + credentials.getFlavor() + " is not AUTH_SYS or RPCSEC_GSS."); XDR reply = new XDR(); RpcDeniedReply rdr = new RpcDeniedReply(xid, RpcReply.ReplyState.MSG_ACCEPTED, RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone()); rdr.write(reply); ChannelBuffer buf = ChannelBuffers.wrappedBuffer(reply.asReadOnlyWrap() .buffer()); RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); RpcUtil.sendRpcResponse(ctx, rsp); return; } } if (!isIdempotent(rpcCall)) { RpcCallCache.CacheEntry entry = rpcCallCache.checkOrAddToCache(client, xid); if (entry != null) { // in cache if (entry.isCompleted()) { LOG.info("Sending the cached reply to retransmitted request " + xid); RpcUtil.sendRpcResponse(ctx, entry.getResponse()); return; } else { // else request is in progress LOG.info("Retransmitted request, transaction still in progress " + xid); // Ignore the request and do nothing return; } } } // Since write and commit could be async, they use their own startTime and // only record success requests. final long startTime = System.nanoTime(); NFS3Response response = null; if (nfsproc3 == NFSPROC3.NULL) { response = nullProcedure(); } else if (nfsproc3 == NFSPROC3.GETATTR) { response = getattr(xdr, info); metrics.addGetattr(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.SETATTR) { response = setattr(xdr, info); metrics.addSetattr(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.LOOKUP) { response = lookup(xdr, info); metrics.addLookup(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.ACCESS) { response = access(xdr, info); metrics.addAccess(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.READLINK) { response = readlink(xdr, info); metrics.addReadlink(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.READ) { if (LOG.isDebugEnabled()) { LOG.debug(Nfs3Utils.READ_RPC_START + xid); } response = read(xdr, info); if (LOG.isDebugEnabled() && (nfsproc3 == NFSPROC3.READ)) { LOG.debug(Nfs3Utils.READ_RPC_END + xid); } metrics.addRead(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.WRITE) { if (LOG.isDebugEnabled()) { LOG.debug(Nfs3Utils.WRITE_RPC_START + xid); } response = write(xdr, info); // Write end debug trace is in Nfs3Utils.writeChannel } else if (nfsproc3 == NFSPROC3.CREATE) { response = create(xdr, info); metrics.addCreate(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.MKDIR) { response = mkdir(xdr, info); metrics.addMkdir(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.SYMLINK) { response = symlink(xdr, info); metrics.addSymlink(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.MKNOD) { response = mknod(xdr, info); metrics.addMknod(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.REMOVE) { response = remove(xdr, info); metrics.addRemove(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.RMDIR) { response = rmdir(xdr, info); metrics.addRmdir(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.RENAME) { response = rename(xdr, info); metrics.addRename(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.LINK) { response = link(xdr, info); metrics.addLink(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.READDIR) { response = readdir(xdr, info); metrics.addReaddir(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.READDIRPLUS) { response = readdirplus(xdr, info); metrics.addReaddirplus(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.FSSTAT) { response = fsstat(xdr, info); metrics.addFsstat(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.FSINFO) { response = fsinfo(xdr, info); metrics.addFsinfo(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.PATHCONF) { response = pathconf(xdr, info); metrics.addPathconf(Nfs3Utils.getElapsedTime(startTime)); } else if (nfsproc3 == NFSPROC3.COMMIT) { response = commit(xdr, info); } else { // Invalid procedure RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( out); } if (response == null) { if (LOG.isDebugEnabled()) { LOG.debug("No sync response, expect an async response for request XID=" + rpcCall.getXid()); } return; } // TODO: currently we just return VerifierNone out = response.serialize(out, xid, new VerifierNone()); ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap() .buffer()); RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); if (!isIdempotent(rpcCall)) { rpcCallCache.callCompleted(client, xid, rsp); } RpcUtil.sendRpcResponse(ctx, rsp); } @Override protected boolean isIdempotent(RpcCall call) { final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(call.getProcedure()); return nfsproc3 == null || nfsproc3.isIdempotent(); } private boolean checkAccessPrivilege(SocketAddress remoteAddress, final AccessPrivilege expected) { // Port monitoring if (!doPortMonitoring(remoteAddress)) { return false; } // Check export table if (exports == null) { return false; } InetAddress client = ((InetSocketAddress) remoteAddress).getAddress(); AccessPrivilege access = exports.getAccessPrivilege(client); if (access == AccessPrivilege.NONE) { return false; } if (access == AccessPrivilege.READ_ONLY && expected == AccessPrivilege.READ_WRITE) { return false; } return true; } @VisibleForTesting WriteManager getWriteManager() { return this.writeManager; } }
87,792
36.874461
83
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.IOException; import java.util.EnumSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.nfs.NfsFileType; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.security.IdMappingServiceProvider; import org.jboss.netty.channel.Channel; import com.google.common.annotations.VisibleForTesting; /** * Manage the writes and responds asynchronously. */ public class WriteManager { public static final Log LOG = LogFactory.getLog(WriteManager.class); private final NfsConfiguration config; private final IdMappingServiceProvider iug; private AsyncDataService asyncDataService; private boolean asyncDataServiceStarted = false; private final int maxStreams; private final boolean aixCompatMode; /** * The time limit to wait for accumulate reordered sequential writes to the * same file before the write is considered done. */ private long streamTimeout; private final OpenFileCtxCache fileContextCache; static public class MultipleCachedStreamException extends IOException { private static final long serialVersionUID = 1L; public MultipleCachedStreamException(String msg) { super(msg); } } boolean addOpenFileStream(FileHandle h, OpenFileCtx ctx) { return fileContextCache.put(h, ctx); } WriteManager(IdMappingServiceProvider iug, final NfsConfiguration config, boolean aixCompatMode) { this.iug = iug; this.config = config; this.aixCompatMode = aixCompatMode; streamTimeout = config.getLong(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY, NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT); LOG.info("Stream timeout is " + streamTimeout + "ms."); if (streamTimeout < NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT) { LOG.info("Reset stream timeout to minimum value " + NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + "ms."); streamTimeout = NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT; } maxStreams = config.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_DEFAULT); LOG.info("Maximum open streams is "+ maxStreams); this.fileContextCache = new OpenFileCtxCache(config, streamTimeout); } void startAsyncDataService() { if (asyncDataServiceStarted) { return; } fileContextCache.start(); this.asyncDataService = new AsyncDataService(); asyncDataServiceStarted = true; } void shutdownAsyncDataService() { if (!asyncDataServiceStarted) { return; } asyncDataServiceStarted = false; asyncDataService.shutdown(); fileContextCache.shutdown(); } void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, Nfs3FileAttributes preOpAttr) throws IOException { int count = request.getCount(); byte[] data = request.getData().array(); if (data.length < count) { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL); Nfs3Utils.writeChannel(channel, response.serialize( new XDR(), xid, new VerifierNone()), xid); return; } FileHandle handle = request.getHandle(); if (LOG.isDebugEnabled()) { LOG.debug("handleWrite " + request); } // Check if there is a stream to write FileHandle fileHandle = request.getHandle(); OpenFileCtx openFileCtx = fileContextCache.get(fileHandle); if (openFileCtx == null) { LOG.info("No opened stream for fileId: " + fileHandle.getFileId()); String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId()); HdfsDataOutputStream fos = null; Nfs3FileAttributes latestAttr = null; try { int bufferSize = config.getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); fos = dfsClient.append(fileIdPath, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null); latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); } catch (RemoteException e) { IOException io = e.unwrapRemoteException(); if (io instanceof AlreadyBeingCreatedException) { LOG.warn("Can't append file: " + fileIdPath + ". Possibly the file is being closed. Drop the request: " + request + ", wait for the client to retry..."); return; } throw e; } catch (IOException e) { LOG.error("Can't append to file: " + fileIdPath, e); if (fos != null) { fos.close(); } WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, count, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); Nfs3Utils.writeChannel(channel, response.serialize( new XDR(), xid, new VerifierNone()), xid); return; } // Add open stream String writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT); openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/" + fileHandle.getFileId(), dfsClient, iug, aixCompatMode, config); if (!addOpenFileStream(fileHandle, openFileCtx)) { LOG.info("Can't add new stream. Close it. Tell client to retry."); try { fos.close(); } catch (IOException e) { LOG.error("Can't close stream for fileId: " + handle.getFileId(), e); } // Notify client to retry WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_JUKEBOX, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid); return; } if (LOG.isDebugEnabled()) { LOG.debug("Opened stream for appending file: " + fileHandle.getFileId()); } } // Add write into the async job queue openFileCtx.receivedNewWrite(dfsClient, request, channel, xid, asyncDataService, iug); return; } // Do a possible commit before read request in case there is buffered data // inside DFSClient which has been flushed but not synced. int commitBeforeRead(DFSClient dfsClient, FileHandle fileHandle, long commitOffset) { int status; OpenFileCtx openFileCtx = fileContextCache.get(fileHandle); if (openFileCtx == null) { if (LOG.isDebugEnabled()) { LOG.debug("No opened stream for fileId: " + fileHandle.getFileId() + " commitOffset=" + commitOffset + ". Return success in this case."); } status = Nfs3Status.NFS3_OK; } else { // commit request triggered by read won't create pending comment obj COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset, null, 0, null, true); switch (ret) { case COMMIT_FINISHED: case COMMIT_INACTIVE_CTX: status = Nfs3Status.NFS3_OK; break; case COMMIT_INACTIVE_WITH_PENDING_WRITE: case COMMIT_ERROR: status = Nfs3Status.NFS3ERR_IO; break; case COMMIT_WAIT: case COMMIT_SPECIAL_WAIT: /** * This should happen rarely in some possible cases, such as read * request arrives before DFSClient is able to quickly flush data to DN, * or Prerequisite writes is not available. Won't wait since we don't * want to block read. */ status = Nfs3Status.NFS3ERR_JUKEBOX; break; case COMMIT_SPECIAL_SUCCESS: // Read beyond eof could result in partial read status = Nfs3Status.NFS3_OK; break; default: LOG.error("Should not get commit return code: " + ret.name()); throw new RuntimeException("Should not get commit return code: " + ret.name()); } } return status; } void handleCommit(DFSClient dfsClient, FileHandle fileHandle, long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) { long startTime = System.nanoTime(); int status; OpenFileCtx openFileCtx = fileContextCache.get(fileHandle); if (openFileCtx == null) { LOG.info("No opened stream for fileId: " + fileHandle.getFileId() + " commitOffset=" + commitOffset + ". Return success in this case."); status = Nfs3Status.NFS3_OK; } else { COMMIT_STATUS ret = openFileCtx.checkCommit(dfsClient, commitOffset, channel, xid, preOpAttr, false); switch (ret) { case COMMIT_FINISHED: case COMMIT_INACTIVE_CTX: status = Nfs3Status.NFS3_OK; break; case COMMIT_INACTIVE_WITH_PENDING_WRITE: case COMMIT_ERROR: status = Nfs3Status.NFS3ERR_IO; break; case COMMIT_WAIT: // Do nothing. Commit is async now. return; case COMMIT_SPECIAL_WAIT: status = Nfs3Status.NFS3ERR_JUKEBOX; break; case COMMIT_SPECIAL_SUCCESS: status = Nfs3Status.NFS3_OK; break; default: LOG.error("Should not get commit return code: " + ret.name()); throw new RuntimeException("Should not get commit return code: " + ret.name()); } } // Send out the response Nfs3FileAttributes postOpAttr = null; try { postOpAttr = getFileAttr(dfsClient, new FileHandle(preOpAttr.getFileId()), iug); } catch (IOException e1) { LOG.info("Can't get postOpAttr for fileId: " + preOpAttr.getFileId(), e1); } WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr); COMMIT3Response response = new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF); RpcProgramNfs3.metrics.addCommit(Nfs3Utils.getElapsedTime(startTime)); Nfs3Utils.writeChannelCommit(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid); } /** * If the file is in cache, update the size based on the cached data size */ Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle fileHandle, IdMappingServiceProvider iug) throws IOException { String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle); Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug); if (attr != null) { OpenFileCtx openFileCtx = fileContextCache.get(fileHandle); if (openFileCtx != null) { attr.setSize(openFileCtx.getNextOffset()); attr.setUsed(openFileCtx.getNextOffset()); } } return attr; } Nfs3FileAttributes getFileAttr(DFSClient client, FileHandle dirHandle, String fileName) throws IOException { String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName; Nfs3FileAttributes attr = Nfs3Utils.getFileAttr(client, fileIdPath, iug); if ((attr != null) && (attr.getType() == NfsFileType.NFSREG.toValue())) { OpenFileCtx openFileCtx = fileContextCache.get(new FileHandle(attr .getFileId())); if (openFileCtx != null) { attr.setSize(openFileCtx.getNextOffset()); attr.setUsed(openFileCtx.getNextOffset()); } } return attr; } @VisibleForTesting OpenFileCtxCache getOpenFileCtxCache() { return this.fileContextCache; } }
13,341
36.268156
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.ClosedChannelException; import java.util.EnumSet; import java.util.Iterator; import java.util.Map.Entry; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx.DataState; import org.apache.hadoop.io.BytesWritable.Comparator; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WccAttr; import org.apache.hadoop.nfs.nfs3.response.WccData; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.security.VerifierNone; import org.apache.hadoop.security.IdMappingServiceProvider; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; import org.jboss.netty.channel.Channel; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; /** * OpenFileCtx saves the context of one HDFS file output stream. Access to it is * synchronized by its member lock. */ class OpenFileCtx { public static final Log LOG = LogFactory.getLog(OpenFileCtx.class); // Pending writes water mark for dump, 1MB private static long DUMP_WRITE_WATER_MARK = 1024 * 1024; static enum COMMIT_STATUS { COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, COMMIT_DO_SYNC, /** * Deferred COMMIT response could fail file uploading. The following two * status are introduced as a solution. 1. if client asks to commit * non-sequential trunk of data, NFS gateway return success with the hope * that client will send the prerequisite writes. 2. if client asks to * commit a sequential trunk(means it can be flushed to HDFS), NFS gateway * return a special error NFS3ERR_JUKEBOX indicating the client needs to * retry. Meanwhile, NFS gateway keeps flush data to HDFS and do sync * eventually. * * The reason to let client wait is that, we want the client to wait for the * last commit. Otherwise, client thinks file upload finished (e.g., cp * command returns success) but NFS could be still flushing staged data to * HDFS. However, we don't know which one is the last commit. We make the * assumption that a commit after sequential writes may be the last. * Referring HDFS-7259 for more details. * */ COMMIT_SPECIAL_WAIT, // scoped pending writes is sequential COMMIT_SPECIAL_SUCCESS;// scoped pending writes is not sequential } private final DFSClient client; private final IdMappingServiceProvider iug; // The stream status. False means the stream is closed. private volatile boolean activeState; // The stream write-back status. True means one thread is doing write back. private volatile boolean asyncStatus; private volatile long asyncWriteBackStartOffset; /** * The current offset of the file in HDFS. All the content before this offset * has been written back to HDFS. */ private AtomicLong nextOffset; private final HdfsDataOutputStream fos; private final boolean aixCompatMode; // It's updated after each sync to HDFS private Nfs3FileAttributes latestAttr; private final ConcurrentNavigableMap<OffsetRange, WriteCtx> pendingWrites; private final ConcurrentNavigableMap<Long, CommitCtx> pendingCommits; static class CommitCtx { private final long offset; private final Channel channel; private final int xid; private final Nfs3FileAttributes preOpAttr; public final long startTime; long getOffset() { return offset; } Channel getChannel() { return channel; } int getXid() { return xid; } Nfs3FileAttributes getPreOpAttr() { return preOpAttr; } long getStartTime() { return startTime; } CommitCtx(long offset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) { this.offset = offset; this.channel = channel; this.xid = xid; this.preOpAttr = preOpAttr; this.startTime = System.nanoTime(); } @Override public String toString() { return String.format("offset: %d xid: %d startTime: %d", offset, xid, startTime); } } // The last write, commit request or write-back event. Updating time to keep // output steam alive. private long lastAccessTime; private volatile boolean enabledDump; private FileOutputStream dumpOut; /** Tracks the data buffered in memory related to non sequential writes */ private AtomicLong nonSequentialWriteInMemory; private RandomAccessFile raf; private final String dumpFilePath; private Daemon dumpThread; private final boolean uploadLargeFile; private void updateLastAccessTime() { lastAccessTime = Time.monotonicNow(); } private boolean checkStreamTimeout(long streamTimeout) { return Time.monotonicNow() - lastAccessTime > streamTimeout; } long getLastAccessTime() { return lastAccessTime; } public long getNextOffset() { return nextOffset.get(); } boolean getActiveState() { return this.activeState; } boolean hasPendingWork() { return (pendingWrites.size() != 0 || pendingCommits.size() != 0); } /** Increase or decrease the memory occupation of non-sequential writes */ private long updateNonSequentialWriteInMemory(long count) { long newValue = nonSequentialWriteInMemory.addAndGet(count); if (LOG.isDebugEnabled()) { LOG.debug("Update nonSequentialWriteInMemory by " + count + " new value: " + newValue); } Preconditions.checkState(newValue >= 0, "nonSequentialWriteInMemory is negative " + newValue + " after update with count " + count); return newValue; } OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr, String dumpFilePath, DFSClient client, IdMappingServiceProvider iug) { this(fos, latestAttr, dumpFilePath, client, iug, false, new NfsConfiguration()); } OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr, String dumpFilePath, DFSClient client, IdMappingServiceProvider iug, boolean aixCompatMode, NfsConfiguration config) { this.fos = fos; this.latestAttr = latestAttr; this.aixCompatMode = aixCompatMode; // We use the ReverseComparatorOnMin as the comparator of the map. In this // way, we first dump the data with larger offset. In the meanwhile, we // retrieve the last element to write back to HDFS. pendingWrites = new ConcurrentSkipListMap<OffsetRange, WriteCtx>( OffsetRange.ReverseComparatorOnMin); pendingCommits = new ConcurrentSkipListMap<Long, CommitCtx>(); updateLastAccessTime(); activeState = true; asyncStatus = false; asyncWriteBackStartOffset = 0; dumpOut = null; raf = null; nonSequentialWriteInMemory = new AtomicLong(0); this.dumpFilePath = dumpFilePath; enabledDump = dumpFilePath != null; nextOffset = new AtomicLong(); nextOffset.set(latestAttr.getSize()); try { assert(nextOffset.get() == this.fos.getPos()); } catch (IOException e) {} dumpThread = null; this.client = client; this.iug = iug; this.uploadLargeFile = config.getBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, NfsConfigKeys.LARGE_FILE_UPLOAD_DEFAULT); } public Nfs3FileAttributes getLatestAttr() { return latestAttr; } // Get flushed offset. Note that flushed data may not be persisted. private long getFlushedOffset() throws IOException { return fos.getPos(); } // Check if need to dump the new writes private void waitForDump() { if (!enabledDump) { if (LOG.isDebugEnabled()) { LOG.debug("Do nothing, dump is disabled."); } return; } if (nonSequentialWriteInMemory.get() < DUMP_WRITE_WATER_MARK) { return; } // wake up the dumper thread to dump the data synchronized (this) { if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) { if (LOG.isDebugEnabled()) { LOG.debug("Asking dumper to dump..."); } if (dumpThread == null) { dumpThread = new Daemon(new Dumper()); dumpThread.start(); } else { this.notifyAll(); } } while (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) { try { this.wait(); } catch (InterruptedException ignored) { } } } } class Dumper implements Runnable { /** Dump data into a file */ private void dump() { // Create dump outputstream for the first time if (dumpOut == null) { LOG.info("Create dump file: " + dumpFilePath); File dumpFile = new File(dumpFilePath); try { synchronized (this) { // check if alive again Preconditions.checkState(dumpFile.createNewFile(), "The dump file should not exist: %s", dumpFilePath); dumpOut = new FileOutputStream(dumpFile); } } catch (IOException e) { LOG.error("Got failure when creating dump stream " + dumpFilePath, e); enabledDump = false; if (dumpOut != null) { try { dumpOut.close(); } catch (IOException e1) { LOG.error("Can't close dump stream " + dumpFilePath, e); } } return; } } // Get raf for the first dump if (raf == null) { try { raf = new RandomAccessFile(dumpFilePath, "r"); } catch (FileNotFoundException e) { LOG.error("Can't get random access to file " + dumpFilePath); // Disable dump enabledDump = false; return; } } if (LOG.isDebugEnabled()) { LOG.debug("Start dump. Before dump, nonSequentialWriteInMemory == " + nonSequentialWriteInMemory.get()); } Iterator<OffsetRange> it = pendingWrites.keySet().iterator(); while (activeState && it.hasNext() && nonSequentialWriteInMemory.get() > 0) { OffsetRange key = it.next(); WriteCtx writeCtx = pendingWrites.get(key); if (writeCtx == null) { // This write was just deleted continue; } try { long dumpedDataSize = writeCtx.dumpData(dumpOut, raf); if (dumpedDataSize > 0) { updateNonSequentialWriteInMemory(-dumpedDataSize); } } catch (IOException e) { LOG.error("Dump data failed: " + writeCtx + " with error: " + e + " OpenFileCtx state: " + activeState); // Disable dump enabledDump = false; return; } } if (LOG.isDebugEnabled()) { LOG.debug("After dump, nonSequentialWriteInMemory == " + nonSequentialWriteInMemory.get()); } } @Override public void run() { while (activeState && enabledDump) { try { if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) { dump(); } synchronized (OpenFileCtx.this) { if (nonSequentialWriteInMemory.get() < DUMP_WRITE_WATER_MARK) { OpenFileCtx.this.notifyAll(); try { OpenFileCtx.this.wait(); if (LOG.isDebugEnabled()) { LOG.debug("Dumper woke up"); } } catch (InterruptedException e) { LOG.info("Dumper is interrupted, dumpFilePath= " + OpenFileCtx.this.dumpFilePath); } } } if (LOG.isDebugEnabled()) { LOG.debug("Dumper checking OpenFileCtx activeState: " + activeState + " enabledDump: " + enabledDump); } } catch (Throwable t) { // unblock threads with new request synchronized (OpenFileCtx.this) { OpenFileCtx.this.notifyAll(); } LOG.info("Dumper get Throwable: " + t + ". dumpFilePath: " + OpenFileCtx.this.dumpFilePath, t); activeState = false; } } } } private WriteCtx checkRepeatedWriteRequest(WRITE3Request request, Channel channel, int xid) { OffsetRange range = new OffsetRange(request.getOffset(), request.getOffset() + request.getCount()); WriteCtx writeCtx = pendingWrites.get(range); if (writeCtx== null) { return null; } else { if (xid != writeCtx.getXid()) { LOG.warn("Got a repeated request, same range, with a different xid: " + xid + " xid in old request: " + writeCtx.getXid()); //TODO: better handling. } return writeCtx; } } public void receivedNewWrite(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, AsyncDataService asyncDataService, IdMappingServiceProvider iug) { if (!activeState) { LOG.info("OpenFileCtx is inactive, fileId: " + request.getHandle().getFileId()); WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid); } else { // Update the write time first updateLastAccessTime(); // Handle repeated write requests (same xid or not). // If already replied, send reply again. If not replied, drop the // repeated request. WriteCtx existantWriteCtx = checkRepeatedWriteRequest(request, channel, xid); if (existantWriteCtx != null) { if (!existantWriteCtx.getReplied()) { if (LOG.isDebugEnabled()) { LOG.debug("Repeated write request which hasn't been served: xid=" + xid + ", drop it."); } } else { if (LOG.isDebugEnabled()) { LOG.debug("Repeated write request which is already served: xid=" + xid + ", resend response."); } WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, request.getCount(), request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); Nfs3Utils.writeChannel(channel, response.serialize( new XDR(), xid, new VerifierNone()), xid); } } else { // not a repeated write request receivedNewWriteInternal(dfsClient, request, channel, xid, asyncDataService, iug); } } } @VisibleForTesting public static void alterWriteRequest(WRITE3Request request, long cachedOffset) { long offset = request.getOffset(); int count = request.getCount(); long smallerCount = offset + count - cachedOffset; if (LOG.isDebugEnabled()) { LOG.debug(String.format("Got overwrite with appended data (%d-%d)," + " current offset %d," + " drop the overlapped section (%d-%d)" + " and append new data (%d-%d).", offset, (offset + count - 1), cachedOffset, offset, (cachedOffset - 1), cachedOffset, (offset + count - 1))); } ByteBuffer data = request.getData(); Preconditions.checkState(data.position() == 0, "The write request data has non-zero position"); data.position((int) (cachedOffset - offset)); Preconditions.checkState(data.limit() - data.position() == smallerCount, "The write request buffer has wrong limit/position regarding count"); request.setOffset(cachedOffset); request.setCount((int) smallerCount); } /** * Creates and adds a WriteCtx into the pendingWrites map. This is a * synchronized method to handle concurrent writes. * * @return A non-null {@link WriteCtx} instance if the incoming write * request's offset >= nextOffset. Otherwise null. */ private synchronized WriteCtx addWritesToCache(WRITE3Request request, Channel channel, int xid) { long offset = request.getOffset(); int count = request.getCount(); long cachedOffset = nextOffset.get(); int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT; if (LOG.isDebugEnabled()) { LOG.debug("requested offset=" + offset + " and current offset=" + cachedOffset); } // Handle a special case first if ((offset < cachedOffset) && (offset + count > cachedOffset)) { // One Linux client behavior: after a file is closed and reopened to // write, the client sometimes combines previous written data(could still // be in kernel buffer) with newly appended data in one write. This is // usually the first write after file reopened. In this // case, we log the event and drop the overlapped section. LOG.warn(String.format("Got overwrite with appended data (%d-%d)," + " current offset %d," + " drop the overlapped section (%d-%d)" + " and append new data (%d-%d).", offset, (offset + count - 1), cachedOffset, offset, (cachedOffset - 1), cachedOffset, (offset + count - 1))); if (!pendingWrites.isEmpty()) { LOG.warn("There are other pending writes, fail this jumbo write"); return null; } LOG.warn("Modify this write to write only the appended data"); alterWriteRequest(request, cachedOffset); // Update local variable originalCount = count; offset = request.getOffset(); count = request.getCount(); } // Fail non-append call if (offset < cachedOffset) { LOG.warn("(offset,count,nextOffset): " + "(" + offset + "," + count + "," + nextOffset + ")"); return null; } else { DataState dataState = offset == cachedOffset ? WriteCtx.DataState.NO_DUMP : WriteCtx.DataState.ALLOW_DUMP; WriteCtx writeCtx = new WriteCtx(request.getHandle(), request.getOffset(), request.getCount(), originalCount, request.getStableHow(), request.getData(), channel, xid, false, dataState); if (LOG.isDebugEnabled()) { LOG.debug("Add new write to the list with nextOffset " + cachedOffset + " and requested offset=" + offset); } if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) { // update the memory size updateNonSequentialWriteInMemory(count); } // check if there is a WriteCtx with the same range in pendingWrites WriteCtx oldWriteCtx = checkRepeatedWriteRequest(request, channel, xid); if (oldWriteCtx == null) { pendingWrites.put(new OffsetRange(offset, offset + count), writeCtx); if (LOG.isDebugEnabled()) { LOG.debug("New write buffered with xid " + xid + " nextOffset " + cachedOffset + " req offset=" + offset + " mapsize=" + pendingWrites.size()); } } else { LOG.warn("Got a repeated request, same range, with xid: " + xid + " nextOffset " + +cachedOffset + " req offset=" + offset); } return writeCtx; } } /** Process an overwrite write request */ private void processOverWrite(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, IdMappingServiceProvider iug) { WccData wccData = new WccData(latestAttr.getWccAttr(), null); long offset = request.getOffset(); int count = request.getCount(); WriteStableHow stableHow = request.getStableHow(); WRITE3Response response; long cachedOffset = nextOffset.get(); if (offset + count > cachedOffset) { LOG.warn("Treat this jumbo write as a real random write, no support."); response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0, WriteStableHow.UNSTABLE, Nfs3Constant.WRITE_COMMIT_VERF); } else { if (LOG.isDebugEnabled()) { LOG.debug("Process perfectOverWrite"); } // TODO: let executor handle perfect overwrite response = processPerfectOverWrite(dfsClient, offset, count, stableHow, request.getData().array(), Nfs3Utils.getFileIdPath(request.getHandle()), wccData, iug); } updateLastAccessTime(); Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid); } /** * Check if we can start the write (back to HDFS) now. If there is no hole for * writing, and there is no other threads writing (i.e., asyncStatus is * false), start the writing and set asyncStatus to true. * * @return True if the new write is sequential and we can start writing * (including the case that there is already a thread writing). */ private synchronized boolean checkAndStartWrite( AsyncDataService asyncDataService, WriteCtx writeCtx) { if (writeCtx.getOffset() == nextOffset.get()) { if (!asyncStatus) { if (LOG.isDebugEnabled()) { LOG.debug("Trigger the write back task. Current nextOffset: " + nextOffset.get()); } asyncStatus = true; asyncWriteBackStartOffset = writeCtx.getOffset(); asyncDataService.execute(new AsyncDataService.WriteBackTask(this)); } else { if (LOG.isDebugEnabled()) { LOG.debug("The write back thread is working."); } } return true; } else { return false; } } private void receivedNewWriteInternal(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, AsyncDataService asyncDataService, IdMappingServiceProvider iug) { WriteStableHow stableHow = request.getStableHow(); WccAttr preOpAttr = latestAttr.getWccAttr(); int count = request.getCount(); WriteCtx writeCtx = addWritesToCache(request, channel, xid); if (writeCtx == null) { // offset < nextOffset processOverWrite(dfsClient, request, channel, xid, iug); } else { // The write is added to pendingWrites. // Check and start writing back if necessary boolean startWriting = checkAndStartWrite(asyncDataService, writeCtx); if (!startWriting) { // offset > nextOffset. check if we need to dump data waitForDump(); // In test, noticed some Linux client sends a batch (e.g., 1MB) // of reordered writes and won't send more writes until it gets // responses of the previous batch. So here send response immediately // for unstable non-sequential write if (stableHow != WriteStableHow.UNSTABLE) { LOG.info("Have to change stable write to unstable write: " + request.getStableHow()); stableHow = WriteStableHow.UNSTABLE; } if (LOG.isDebugEnabled()) { LOG.debug("UNSTABLE write request, send response for offset: " + writeCtx.getOffset()); } WccData fileWcc = new WccData(preOpAttr, latestAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); RpcProgramNfs3.metrics.addWrite(Nfs3Utils .getElapsedTime(writeCtx.startTime)); Nfs3Utils .writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid); writeCtx.setReplied(true); } } } /** * Honor 2 kinds of overwrites: 1). support some application like touch(write * the same content back to change mtime), 2) client somehow sends the same * write again in a different RPC. */ private WRITE3Response processPerfectOverWrite(DFSClient dfsClient, long offset, int count, WriteStableHow stableHow, byte[] data, String path, WccData wccData, IdMappingServiceProvider iug) { WRITE3Response response; // Read the content back byte[] readbuffer = new byte[count]; int readCount = 0; FSDataInputStream fis = null; try { // Sync file data and length to avoid partial read failure fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); } catch (ClosedChannelException closedException) { LOG.info("The FSDataOutputStream has been closed. " + "Continue processing the perfect overwrite."); } catch (IOException e) { LOG.info("hsync failed when processing possible perfect overwrite, path=" + path + " error: " + e); return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); } try { fis = dfsClient.createWrappedInputStream(dfsClient.open(path)); readCount = fis.read(offset, readbuffer, 0, count); if (readCount < count) { LOG.error("Can't read back " + count + " bytes, partial read size: " + readCount); return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); } } catch (IOException e) { LOG.info("Read failed when processing possible perfect overwrite, path=" + path, e); return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); } finally { IOUtils.cleanup(LOG, fis); } // Compare with the request Comparator comparator = new Comparator(); if (comparator.compare(readbuffer, 0, readCount, data, 0, count) != 0) { LOG.info("Perfect overwrite has different content"); response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); } else { LOG.info("Perfect overwrite has same content," + " updating the mtime, then return success"); Nfs3FileAttributes postOpAttr = null; try { dfsClient.setTimes(path, Time.monotonicNow(), -1); postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug); } catch (IOException e) { LOG.info("Got error when processing perfect overwrite, path=" + path + " error: " + e); return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); } wccData.setPostOpAttr(postOpAttr); response = new WRITE3Response(Nfs3Status.NFS3_OK, wccData, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); } return response; } /** * Check the commit status with the given offset * @param commitOffset the offset to commit * @param channel the channel to return response * @param xid the xid of the commit request * @param preOpAttr the preOp attribute * @param fromRead whether the commit is triggered from read request * @return one commit status: COMMIT_FINISHED, COMMIT_WAIT, * COMMIT_INACTIVE_CTX, COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR */ public COMMIT_STATUS checkCommit(DFSClient dfsClient, long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) { if (!fromRead) { Preconditions.checkState(channel != null && preOpAttr != null); // Keep stream active updateLastAccessTime(); } Preconditions.checkState(commitOffset >= 0); COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid, preOpAttr, fromRead); if (LOG.isDebugEnabled()) { LOG.debug("Got commit status: " + ret.name()); } // Do the sync outside the lock if (ret == COMMIT_STATUS.COMMIT_DO_SYNC || ret == COMMIT_STATUS.COMMIT_FINISHED) { try { // Sync file data and length fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); ret = COMMIT_STATUS.COMMIT_FINISHED; // Remove COMMIT_DO_SYNC status // Nothing to do for metadata since attr related change is pass-through } catch (ClosedChannelException cce) { if (pendingWrites.isEmpty()) { ret = COMMIT_STATUS.COMMIT_FINISHED; } else { ret = COMMIT_STATUS.COMMIT_ERROR; } } catch (IOException e) { LOG.error("Got stream error during data sync: " + e); // Do nothing. Stream will be closed eventually by StreamMonitor. // status = Nfs3Status.NFS3ERR_IO; ret = COMMIT_STATUS.COMMIT_ERROR; } } return ret; } // Check if the to-commit range is sequential @VisibleForTesting synchronized boolean checkSequential(final long commitOffset, final long nextOffset) { Preconditions.checkState(commitOffset >= nextOffset, "commitOffset " + commitOffset + " less than nextOffset " + nextOffset); long offset = nextOffset; Iterator<OffsetRange> it = pendingWrites.descendingKeySet().iterator(); while (it.hasNext()) { OffsetRange range = it.next(); if (range.getMin() != offset) { // got a hole return false; } offset = range.getMax(); if (offset > commitOffset) { return true; } } // there is gap between the last pending write and commitOffset return false; } private COMMIT_STATUS handleSpecialWait(boolean fromRead, long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr) { if (!fromRead) { // let client retry the same request, add pending commit to sync later CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, preOpAttr); pendingCommits.put(commitOffset, commitCtx); } if (LOG.isDebugEnabled()) { LOG.debug("return COMMIT_SPECIAL_WAIT"); } return COMMIT_STATUS.COMMIT_SPECIAL_WAIT; } @VisibleForTesting synchronized COMMIT_STATUS checkCommitInternal(long commitOffset, Channel channel, int xid, Nfs3FileAttributes preOpAttr, boolean fromRead) { if (!activeState) { if (pendingWrites.isEmpty()) { return COMMIT_STATUS.COMMIT_INACTIVE_CTX; } else { // TODO: return success if already committed return COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE; } } long flushed = 0; try { flushed = getFlushedOffset(); } catch (IOException e) { LOG.error("Can't get flushed offset, error:" + e); return COMMIT_STATUS.COMMIT_ERROR; } if (LOG.isDebugEnabled()) { LOG.debug("getFlushedOffset=" + flushed + " commitOffset=" + commitOffset + "nextOffset=" + nextOffset.get()); } if (pendingWrites.isEmpty()) { if (aixCompatMode) { // Note that, there is no guarantee data is synced. Caller should still // do a sync here though the output stream might be closed. return COMMIT_STATUS.COMMIT_FINISHED; } else { if (flushed < nextOffset.get()) { if (LOG.isDebugEnabled()) { LOG.debug("get commit while still writing to the requested offset," + " with empty queue"); } return handleSpecialWait(fromRead, nextOffset.get(), channel, xid, preOpAttr); } else { return COMMIT_STATUS.COMMIT_FINISHED; } } } Preconditions.checkState(flushed <= nextOffset.get(), "flushed " + flushed + " is larger than nextOffset " + nextOffset.get()); // Handle large file upload if (uploadLargeFile && !aixCompatMode) { long co = (commitOffset > 0) ? commitOffset : pendingWrites.firstEntry() .getKey().getMax() - 1; if (co <= flushed) { return COMMIT_STATUS.COMMIT_DO_SYNC; } else if (co < nextOffset.get()) { if (LOG.isDebugEnabled()) { LOG.debug("get commit while still writing to the requested offset"); } return handleSpecialWait(fromRead, co, channel, xid, preOpAttr); } else { // co >= nextOffset if (checkSequential(co, nextOffset.get())) { return handleSpecialWait(fromRead, co, channel, xid, preOpAttr); } else { if (LOG.isDebugEnabled()) { LOG.debug("return COMMIT_SPECIAL_SUCCESS"); } return COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS; } } } if (commitOffset > 0) { if (aixCompatMode) { // The AIX NFS client misinterprets RFC-1813 and will always send 4096 // for the commitOffset even if fewer bytes than that have ever (or will // ever) be sent by the client. So, if in AIX compatibility mode, we // will always DO_SYNC if the number of bytes to commit have already all // been flushed, else we will fall through to the logic below which // checks for pending writes in the case that we're being asked to // commit more bytes than have so far been flushed. See HDFS-6549 for // more info. if (commitOffset <= flushed) { return COMMIT_STATUS.COMMIT_DO_SYNC; } } else { if (commitOffset > flushed) { if (!fromRead) { CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, preOpAttr); pendingCommits.put(commitOffset, commitCtx); } return COMMIT_STATUS.COMMIT_WAIT; } else { return COMMIT_STATUS.COMMIT_DO_SYNC; } } } Entry<OffsetRange, WriteCtx> key = pendingWrites.firstEntry(); // Commit whole file, commitOffset == 0 if (!fromRead) { // Insert commit long maxOffset = key.getKey().getMax() - 1; Preconditions.checkState(maxOffset > 0); CommitCtx commitCtx = new CommitCtx(maxOffset, channel, xid, preOpAttr); pendingCommits.put(maxOffset, commitCtx); } return COMMIT_STATUS.COMMIT_WAIT; } /** * Check stream status to decide if it should be closed * @return true, remove stream; false, keep stream */ public synchronized boolean streamCleanup(long fileId, long streamTimeout) { Preconditions .checkState(streamTimeout >= NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT); if (!activeState) { return true; } boolean flag = false; // Check the stream timeout if (checkStreamTimeout(streamTimeout)) { if (LOG.isDebugEnabled()) { LOG.debug("stream can be closed for fileId: " + fileId); } flag = true; } return flag; } /** * Get (and remove) the next WriteCtx from {@link #pendingWrites} if possible. * * @return Null if {@link #pendingWrites} is null, or the next WriteCtx's * offset is larger than nextOffSet. */ private synchronized WriteCtx offerNextToWrite() { if (pendingWrites.isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("The async write task has no pending writes, fileId: " + latestAttr.getFileId()); } // process pending commit again to handle this race: a commit is added // to pendingCommits map just after the last doSingleWrite returns. // There is no pending write and the commit should be handled by the // last doSingleWrite. Due to the race, the commit is left along and // can't be processed until cleanup. Therefore, we should do another // processCommits to fix the race issue. processCommits(nextOffset.get()); // nextOffset has same value as // flushedOffset this.asyncStatus = false; return null; } Entry<OffsetRange, WriteCtx> lastEntry = pendingWrites.lastEntry(); OffsetRange range = lastEntry.getKey(); WriteCtx toWrite = lastEntry.getValue(); if (LOG.isTraceEnabled()) { LOG.trace("range.getMin()=" + range.getMin() + " nextOffset=" + nextOffset); } long offset = nextOffset.get(); if (range.getMin() > offset) { if (LOG.isDebugEnabled()) { LOG.debug("The next sequential write has not arrived yet"); } processCommits(nextOffset.get()); // handle race this.asyncStatus = false; } else if (range.getMin() < offset && range.getMax() > offset) { // shouldn't happen since we do sync for overlapped concurrent writers LOG.warn("Got an overlapping write (" + range.getMin() + ", " + range.getMax() + "), nextOffset=" + offset + ". Silently drop it now"); pendingWrites.remove(range); processCommits(nextOffset.get()); // handle race } else { if (LOG.isDebugEnabled()) { LOG.debug("Remove write(" + range.getMin() + "-" + range.getMax() + ") from the list"); } // after writing, remove the WriteCtx from cache pendingWrites.remove(range); // update nextOffset nextOffset.addAndGet(toWrite.getCount()); if (LOG.isDebugEnabled()) { LOG.debug("Change nextOffset to " + nextOffset.get()); } return toWrite; } return null; } /** Invoked by AsyncDataService to write back to HDFS */ void executeWriteBack() { Preconditions.checkState(asyncStatus, "openFileCtx has false asyncStatus, fileId: " + latestAttr.getFileId()); final long startOffset = asyncWriteBackStartOffset; try { while (activeState) { // asyncStatus could be changed to false in offerNextToWrite() WriteCtx toWrite = offerNextToWrite(); if (toWrite != null) { // Do the write doSingleWrite(toWrite); updateLastAccessTime(); } else { break; } } if (!activeState && LOG.isDebugEnabled()) { LOG.debug("The openFileCtx is not active anymore, fileId: " + latestAttr.getFileId()); } } finally { // Make sure to reset asyncStatus to false unless a race happens synchronized (this) { if (startOffset == asyncWriteBackStartOffset) { asyncStatus = false; } else { LOG.info("Another async task is already started before this one" + " is finalized. fileId: " + latestAttr.getFileId() + " asyncStatus: " + asyncStatus + " original startOffset: " + startOffset + " new startOffset: " + asyncWriteBackStartOffset + ". Won't change asyncStatus here."); } } } } private void processCommits(long offset) { Preconditions.checkState(offset > 0); long flushedOffset = 0; Entry<Long, CommitCtx> entry = null; int status = Nfs3Status.NFS3ERR_IO; try { flushedOffset = getFlushedOffset(); entry = pendingCommits.firstEntry(); if (entry == null || entry.getValue().offset > flushedOffset) { return; } // Now do sync for the ready commits // Sync file data and length fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); status = Nfs3Status.NFS3_OK; } catch (ClosedChannelException cce) { if (!pendingWrites.isEmpty()) { LOG.error("Can't sync for fileId: " + latestAttr.getFileId() + ". Channel closed with writes pending.", cce); } status = Nfs3Status.NFS3ERR_IO; } catch (IOException e) { LOG.error("Got stream error during data sync: ", e); // Do nothing. Stream will be closed eventually by StreamMonitor. status = Nfs3Status.NFS3ERR_IO; } // Update latestAttr try { latestAttr = Nfs3Utils.getFileAttr(client, Nfs3Utils.getFileIdPath(latestAttr.getFileId()), iug); } catch (IOException e) { LOG.error("Can't get new file attr, fileId: " + latestAttr.getFileId(), e); status = Nfs3Status.NFS3ERR_IO; } if (latestAttr.getSize() != offset) { LOG.error("After sync, the expect file size: " + offset + ", however actual file size is: " + latestAttr.getSize()); status = Nfs3Status.NFS3ERR_IO; } WccData wccData = new WccData(Nfs3Utils.getWccAttr(latestAttr), latestAttr); // Send response for the ready commits while (entry != null && entry.getValue().offset <= flushedOffset) { pendingCommits.remove(entry.getKey()); CommitCtx commit = entry.getValue(); COMMIT3Response response = new COMMIT3Response(status, wccData, Nfs3Constant.WRITE_COMMIT_VERF); RpcProgramNfs3.metrics.addCommit(Nfs3Utils .getElapsedTime(commit.startTime)); Nfs3Utils.writeChannelCommit(commit.getChannel(), response .serialize(new XDR(), commit.getXid(), new VerifierNone()), commit.getXid()); if (LOG.isDebugEnabled()) { LOG.debug("FileId: " + latestAttr.getFileId() + " Service time: " + Nfs3Utils.getElapsedTime(commit.startTime) + "ns. Sent response for commit: " + commit); } entry = pendingCommits.firstEntry(); } } private void doSingleWrite(final WriteCtx writeCtx) { Channel channel = writeCtx.getChannel(); int xid = writeCtx.getXid(); long offset = writeCtx.getOffset(); int count = writeCtx.getCount(); WriteStableHow stableHow = writeCtx.getStableHow(); FileHandle handle = writeCtx.getHandle(); if (LOG.isDebugEnabled()) { LOG.debug("do write, fileId: " + handle.getFileId() + " offset: " + offset + " length: " + count + " stableHow: " + stableHow.name()); } try { // The write is not protected by lock. asyncState is used to make sure // there is one thread doing write back at any time writeCtx.writeData(fos); RpcProgramNfs3.metrics.incrBytesWritten(writeCtx.getCount()); long flushedOffset = getFlushedOffset(); if (flushedOffset != (offset + count)) { throw new IOException("output stream is out of sync, pos=" + flushedOffset + " and nextOffset should be" + (offset + count)); } // Reduce memory occupation size if request was allowed dumped if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) { synchronized (writeCtx) { if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) { writeCtx.setDataState(WriteCtx.DataState.NO_DUMP); updateNonSequentialWriteInMemory(-count); if (LOG.isDebugEnabled()) { LOG.debug("After writing " + handle.getFileId() + " at offset " + offset + ", updated the memory count, new value: " + nonSequentialWriteInMemory.get()); } } } } if (!writeCtx.getReplied()) { if (stableHow != WriteStableHow.UNSTABLE) { LOG.info("Do sync for stable write: " + writeCtx); try { if (stableHow == WriteStableHow.DATA_SYNC) { fos.hsync(); } else { Preconditions.checkState(stableHow == WriteStableHow.FILE_SYNC, "Unknown WriteStableHow: " + stableHow); // Sync file data and length fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); } } catch (IOException e) { LOG.error("hsync failed with writeCtx: " + writeCtx, e); throw e; } } WccAttr preOpAttr = latestAttr.getWccAttr(); WccData fileWcc = new WccData(preOpAttr, latestAttr); if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) { LOG.warn("Return original count: " + writeCtx.getOriginalCount() + " instead of real data count: " + count); count = writeCtx.getOriginalCount(); } WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK, fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF); RpcProgramNfs3.metrics.addWrite(Nfs3Utils.getElapsedTime(writeCtx.startTime)); Nfs3Utils.writeChannel(channel, response.serialize( new XDR(), xid, new VerifierNone()), xid); } // Handle the waiting commits without holding any lock processCommits(writeCtx.getOffset() + writeCtx.getCount()); } catch (IOException e) { LOG.error("Error writing to fileId " + handle.getFileId() + " at offset " + offset + " and length " + count, e); if (!writeCtx.getReplied()) { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO); Nfs3Utils.writeChannel(channel, response.serialize( new XDR(), xid, new VerifierNone()), xid); // Keep stream open. Either client retries or SteamMonitor closes it. } LOG.info("Clean up open file context for fileId: " + latestAttr.getFileId()); cleanup(); } } synchronized void cleanup() { if (!activeState) { LOG.info("Current OpenFileCtx is already inactive, no need to cleanup."); return; } activeState = false; // stop the dump thread if (dumpThread != null && dumpThread.isAlive()) { dumpThread.interrupt(); try { dumpThread.join(3000); } catch (InterruptedException ignored) { } } // Close stream try { if (fos != null) { fos.close(); } } catch (IOException e) { LOG.info("Can't close stream for fileId: " + latestAttr.getFileId() + ", error: " + e); } // Reply error for pending writes LOG.info("There are " + pendingWrites.size() + " pending writes."); WccAttr preOpAttr = latestAttr.getWccAttr(); while (!pendingWrites.isEmpty()) { OffsetRange key = pendingWrites.firstKey(); LOG.info("Fail pending write: (" + key.getMin() + ", " + key.getMax() + "), nextOffset=" + nextOffset.get()); WriteCtx writeCtx = pendingWrites.remove(key); if (!writeCtx.getReplied()) { WccData fileWcc = new WccData(preOpAttr, latestAttr); WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, 0, writeCtx.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF); Nfs3Utils.writeChannel(writeCtx.getChannel(), response .serialize(new XDR(), writeCtx.getXid(), new VerifierNone()), writeCtx.getXid()); } } // Cleanup dump file if (dumpOut != null) { try { dumpOut.close(); } catch (IOException e) { LOG.error("Failed to close outputstream of dump file" + dumpFilePath, e); } File dumpFile = new File(dumpFilePath); if (dumpFile.exists() && !dumpFile.delete()) { LOG.error("Failed to delete dumpfile: " + dumpFile); } } if (raf != null) { try { raf.close(); } catch (IOException e) { LOG.error("Got exception when closing input stream of dump file.", e); } } } @VisibleForTesting ConcurrentNavigableMap<OffsetRange, WriteCtx> getPendingWritesForTest(){ return pendingWrites; } @VisibleForTesting ConcurrentNavigableMap<Long, CommitCtx> getPendingCommitsForTest(){ return pendingCommits; } @VisibleForTesting long getNextOffsetForTest() { return nextOffset.get(); } @VisibleForTesting void setNextOffsetForTest(long newValue) { nextOffset.set(newValue); } @VisibleForTesting void setActiveStatusForTest(boolean activeState) { this.activeState = activeState; } @Override public String toString() { return String.format("activeState: %b asyncStatus: %b nextOffset: %d", activeState, asyncStatus, nextOffset.get()); } }
49,514
35.623521
87
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.IOException; import java.net.DatagramSocket; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.mount.Mountd; import org.apache.hadoop.nfs.nfs3.Nfs3Base; import org.apache.hadoop.util.StringUtils; import com.google.common.annotations.VisibleForTesting; /** * Nfs server. Supports NFS v3 using {@link RpcProgramNfs3}. * Currently Mountd program is also started inside this class. * Only TCP server is supported and UDP is not supported. */ public class Nfs3 extends Nfs3Base { private Mountd mountd; public Nfs3(NfsConfiguration conf) throws IOException { this(conf, null, true); } public Nfs3(NfsConfiguration conf, DatagramSocket registrationSocket, boolean allowInsecurePorts) throws IOException { super(RpcProgramNfs3.createRpcProgramNfs3(conf, registrationSocket, allowInsecurePorts), conf); mountd = new Mountd(conf, registrationSocket, allowInsecurePorts); } public Mountd getMountd() { return mountd; } @VisibleForTesting public void startServiceInternal(boolean register) throws IOException { mountd.start(register); // Start mountd start(register); } static void startService(String[] args, DatagramSocket registrationSocket) throws IOException { StringUtils.startupShutdownMessage(Nfs3.class, args, LOG); NfsConfiguration conf = new NfsConfiguration(); boolean allowInsecurePorts = conf.getBoolean( NfsConfigKeys.DFS_NFS_PORT_MONITORING_DISABLED_KEY, NfsConfigKeys.DFS_NFS_PORT_MONITORING_DISABLED_DEFAULT); final Nfs3 nfsServer = new Nfs3(conf, registrationSocket, allowInsecurePorts); nfsServer.startServiceInternal(true); } public static void main(String[] args) throws IOException { startService(args, null); } }
2,728
34.907895
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/XAttr.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import java.util.Arrays; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; /** * XAttr is the POSIX Extended Attribute model similar to that found in * traditional Operating Systems. Extended Attributes consist of one * or more name/value pairs associated with a file or directory. Five * namespaces are defined: user, trusted, security, system and raw. * 1) USER namespace attributes may be used by any user to store * arbitrary information. Access permissions in this namespace are * defined by a file directory's permission bits. For sticky directories, * only the owner and privileged user can write attributes. * <br> * 2) TRUSTED namespace attributes are only visible and accessible to * privileged users. This namespace is available from both user space * (filesystem API) and fs kernel. * <br> * 3) SYSTEM namespace attributes are used by the fs kernel to store * system objects. This namespace is only available in the fs * kernel. It is not visible to users. * <br> * 4) SECURITY namespace attributes are used by the fs kernel for * security features. It is not visible to users. * <br> * 5) RAW namespace attributes are used for internal system attributes that * sometimes need to be exposed. Like SYSTEM namespace attributes they are * not visible to the user except when getXAttr/getXAttrs is called on a file * or directory in the /.reserved/raw HDFS directory hierarchy. These * attributes can only be accessed by the superuser. * <p/> * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes"> * http://en.wikipedia.org/wiki/Extended_file_attributes</a> * */ @InterfaceAudience.Private public class XAttr { public static enum NameSpace { USER, TRUSTED, SECURITY, SYSTEM, RAW; } private final NameSpace ns; private final String name; private final byte[] value; public static class Builder { private NameSpace ns = NameSpace.USER; private String name; private byte[] value; public Builder setNameSpace(NameSpace ns) { this.ns = ns; return this; } public Builder setName(String name) { this.name = name; return this; } public Builder setValue(byte[] value) { this.value = value; return this; } public XAttr build() { return new XAttr(ns, name, value); } } private XAttr(NameSpace ns, String name, byte[] value) { this.ns = ns; this.name = name; this.value = value; } public NameSpace getNameSpace() { return ns; } public String getName() { return name; } public byte[] getValue() { return value; } @Override public int hashCode() { return new HashCodeBuilder(811, 67) .append(name) .append(ns) .append(value) .toHashCode(); } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (obj == this) { return true; } if (obj.getClass() != getClass()) { return false; } XAttr rhs = (XAttr) obj; return new EqualsBuilder() .append(ns, rhs.ns) .append(name, rhs.name) .append(value, rhs.value) .isEquals(); } /** * Similar to {@link #equals(Object)}, except ignores the XAttr value. * * @param obj to compare equality * @return if the XAttrs are equal, ignoring the XAttr value */ public boolean equalsIgnoreValue(Object obj) { if (obj == null) { return false; } if (obj == this) { return true; } if (obj.getClass() != getClass()) { return false; } XAttr rhs = (XAttr) obj; return new EqualsBuilder() .append(ns, rhs.ns) .append(name, rhs.name) .isEquals(); } @Override public String toString() { return "XAttr [ns=" + ns + ", name=" + name + ", value=" + Arrays.toString(value) + "]"; } }
4,842
28.530488
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/CacheFlag.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Specifies semantics for CacheDirective operations. Multiple flags can * be combined in an EnumSet. */ @InterfaceAudience.Public @InterfaceStability.Evolving public enum CacheFlag { /** * Ignore cache pool resource limits when performing this operation. */ FORCE((short) 0x01); private final short mode; private CacheFlag(short mode) { this.mode = mode; } short getMode() { return mode; } }
1,388
29.866667
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; import java.net.URI; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX; @InterfaceAudience.Private public class HAUtilClient { /** * @return true if the given nameNodeUri appears to be a logical URI. */ public static boolean isLogicalUri( Configuration conf, URI nameNodeUri) { String host = nameNodeUri.getHost(); // A logical name must be one of the service IDs. return DFSUtilClient.getNameServiceIds(conf).contains(host); } /** * Check whether the client has a failover proxy provider configured * for the namenode/nameservice. * * @param conf Configuration * @param nameNodeUri The URI of namenode * @return true if failover is configured. */ public static boolean isClientFailoverConfigured( Configuration conf, URI nameNodeUri) { String host = nameNodeUri.getHost(); String configKey = HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + host; return conf.get(configKey) != null; } /** * Get the service name used in the delegation token for the given logical * HA service. * @param uri the logical URI of the cluster * @param scheme the scheme of the corresponding FileSystem * @return the service name */ public static Text buildTokenServiceForLogicalUri(final URI uri, final String scheme) { return new Text(buildTokenServicePrefixForLogicalUri(scheme) + uri.getHost()); } public static String buildTokenServicePrefixForLogicalUri(String scheme) { return HA_DT_SERVICE_PREFIX + scheme + ":"; } /** * Parse the file system URI out of the provided token. */ public static URI getServiceUriFromToken(final String scheme, Token<?> token) { String tokStr = token.getService().toString(); final String prefix = buildTokenServicePrefixForLogicalUri( scheme); if (tokStr.startsWith(prefix)) { tokStr = tokStr.replaceFirst(prefix, ""); } return URI.create(scheme + "://" + tokStr); } /** * @return true if this token corresponds to a logical nameservice * rather than a specific namenode. */ public static boolean isTokenForLogicalUri(Token<?> token) { return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX); } }
3,362
34.03125
81
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs; import com.google.common.base.Joiner; import com.google.common.collect.Maps; import com.google.common.primitives.SignedBytes; import org.apache.commons.io.Charsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.UnsupportedEncodingException; import java.net.InetSocketAddress; import java.text.SimpleDateFormat; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Locale; import java.util.Map; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES; public class DFSUtilClient { public static final byte[] EMPTY_BYTES = {}; private static final Logger LOG = LoggerFactory.getLogger( DFSUtilClient.class); /** * Converts a string to a byte array using UTF8 encoding. */ public static byte[] string2Bytes(String str) { return str.getBytes(Charsets.UTF_8); } /** * Converts a byte array to a string using UTF8 encoding. */ public static String bytes2String(byte[] bytes) { return bytes2String(bytes, 0, bytes.length); } /** Return used as percentage of capacity */ public static float getPercentUsed(long used, long capacity) { return capacity <= 0 ? 100 : (used * 100.0f)/capacity; } /** Return remaining as percentage of capacity */ public static float getPercentRemaining(long remaining, long capacity) { return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity; } /** Convert percentage to a string. */ public static String percent2String(double percentage) { return StringUtils.format("%.2f%%", percentage); } /** * Returns collection of nameservice Ids from the configuration. * @param conf configuration * @return collection of nameservice Ids, or null if not specified */ public static Collection<String> getNameServiceIds(Configuration conf) { return conf.getTrimmedStringCollection(DFS_NAMESERVICES); } /** * Namenode HighAvailability related configuration. * Returns collection of namenode Ids from the configuration. One logical id * for each namenode in the in the HA setup. * * @param conf configuration * @param nsId the nameservice ID to look at, or null for non-federated * @return collection of namenode Ids */ public static Collection<String> getNameNodeIds(Configuration conf, String nsId) { String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId); return conf.getTrimmedStringCollection(key); } /** Add non empty and non null suffix to a key */ static String addSuffix(String key, String suffix) { if (suffix == null || suffix.isEmpty()) { return key; } assert !suffix.startsWith(".") : "suffix '" + suffix + "' should not already have '.' prepended."; return key + "." + suffix; } /** * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from * the configuration. * * @return list of InetSocketAddresses */ public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses( Configuration conf, String scheme) { if (WebHdfsConstants.WEBHDFS_SCHEME.equals(scheme)) { return getAddresses(conf, null, HdfsClientConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); } else if (WebHdfsConstants.SWEBHDFS_SCHEME.equals(scheme)) { return getAddresses(conf, null, HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY); } else { throw new IllegalArgumentException("Unsupported scheme: " + scheme); } } /** * Convert a LocatedBlocks to BlockLocations[] * @param blocks a LocatedBlocks * @return an array of BlockLocations */ public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) { if (blocks == null) { return new BlockLocation[0]; } return locatedBlocks2Locations(blocks.getLocatedBlocks()); } /** * Convert a List<LocatedBlock> to BlockLocation[] * @param blocks A List<LocatedBlock> to be converted * @return converted array of BlockLocation */ public static BlockLocation[] locatedBlocks2Locations( List<LocatedBlock> blocks) { if (blocks == null) { return new BlockLocation[0]; } int nrBlocks = blocks.size(); BlockLocation[] blkLocations = new BlockLocation[nrBlocks]; if (nrBlocks == 0) { return blkLocations; } int idx = 0; for (LocatedBlock blk : blocks) { assert idx < nrBlocks : "Incorrect index"; DatanodeInfo[] locations = blk.getLocations(); String[] hosts = new String[locations.length]; String[] xferAddrs = new String[locations.length]; String[] racks = new String[locations.length]; for (int hCnt = 0; hCnt < locations.length; hCnt++) { hosts[hCnt] = locations[hCnt].getHostName(); xferAddrs[hCnt] = locations[hCnt].getXferAddr(); NodeBase node = new NodeBase(xferAddrs[hCnt], locations[hCnt].getNetworkLocation()); racks[hCnt] = node.toString(); } DatanodeInfo[] cachedLocations = blk.getCachedLocations(); String[] cachedHosts = new String[cachedLocations.length]; for (int i=0; i<cachedLocations.length; i++) { cachedHosts[i] = cachedLocations[i].getHostName(); } blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts, racks, blk.getStartOffset(), blk.getBlockSize(), blk.isCorrupt()); idx++; } return blkLocations; } /** Compare two byte arrays by lexicographical order. */ public static int compareBytes(byte[] left, byte[] right) { if (left == null) { left = EMPTY_BYTES; } if (right == null) { right = EMPTY_BYTES; } return SignedBytes.lexicographicalComparator().compare(left, right); } /** * Given a list of path components returns a byte array */ public static byte[] byteArray2bytes(byte[][] pathComponents) { if (pathComponents.length == 0) { return EMPTY_BYTES; } else if (pathComponents.length == 1 && (pathComponents[0] == null || pathComponents[0].length == 0)) { return new byte[]{(byte) Path.SEPARATOR_CHAR}; } int length = 0; for (int i = 0; i < pathComponents.length; i++) { length += pathComponents[i].length; if (i < pathComponents.length - 1) { length++; // for SEPARATOR } } byte[] path = new byte[length]; int index = 0; for (int i = 0; i < pathComponents.length; i++) { System.arraycopy(pathComponents[i], 0, path, index, pathComponents[i].length); index += pathComponents[i].length; if (i < pathComponents.length - 1) { path[index] = (byte) Path.SEPARATOR_CHAR; index++; } } return path; } /** * Decode a specific range of bytes of the given byte array to a string * using UTF8. * * @param bytes The bytes to be decoded into characters * @param offset The index of the first byte to decode * @param length The number of bytes to decode * @return The decoded string */ private static String bytes2String(byte[] bytes, int offset, int length) { try { return new String(bytes, offset, length, "UTF8"); } catch(UnsupportedEncodingException e) { assert false : "UTF8 encoding is not supported "; } return null; } /** * @return <code>coll</code> if it is non-null and non-empty. Otherwise, * returns a list with a single null value. */ static Collection<String> emptyAsSingletonNull(Collection<String> coll) { if (coll == null || coll.isEmpty()) { return Collections.singletonList(null); } else { return coll; } } /** Concatenate list of suffix strings '.' separated */ static String concatSuffixes(String... suffixes) { if (suffixes == null) { return null; } return Joiner.on(".").skipNulls().join(suffixes); } /** * Returns the configured address for all NameNodes in the cluster. * @param conf configuration * @param defaultAddress default address to return in case key is not found. * @param keys Set of keys to look for in the order of preference * @return a map(nameserviceId to map(namenodeId to InetSocketAddress)) */ static Map<String, Map<String, InetSocketAddress>> getAddresses(Configuration conf, String defaultAddress, String... keys) { Collection<String> nameserviceIds = getNameServiceIds(conf); return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys); } /** * Returns the configured address for all NameNodes in the cluster. * @param conf configuration * @param defaultAddress default address to return in case key is not found. * @param keys Set of keys to look for in the order of preference * * @return a map(nameserviceId to map(namenodeId to InetSocketAddress)) */ static Map<String, Map<String, InetSocketAddress>> getAddressesForNsIds( Configuration conf, Collection<String> nsIds, String defaultAddress, String... keys) { // Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>] // across all of the configured nameservices and namenodes. Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap(); for (String nsId : emptyAsSingletonNull(nsIds)) { Map<String, InetSocketAddress> isas = getAddressesForNameserviceId(conf, nsId, defaultAddress, keys); if (!isas.isEmpty()) { ret.put(nsId, isas); } } return ret; } static Map<String, InetSocketAddress> getAddressesForNameserviceId( Configuration conf, String nsId, String defaultValue, String... keys) { Collection<String> nnIds = getNameNodeIds(conf, nsId); Map<String, InetSocketAddress> ret = Maps.newHashMap(); for (String nnId : emptyAsSingletonNull(nnIds)) { String suffix = concatSuffixes(nsId, nnId); String address = getConfValue(defaultValue, suffix, conf, keys); if (address != null) { InetSocketAddress isa = NetUtils.createSocketAddr(address); if (isa.isUnresolved()) { LOG.warn("Namenode for {} remains unresolved for ID {}. Check your " + "hdfs-site.xml file to ensure namenodes are configured " + "properly.", nsId, nnId); } ret.put(nnId, isa); } } return ret; } /** * Given a list of keys in the order of preference, returns a value * for the key in the given order from the configuration. * @param defaultValue default value to return, when key was not found * @param keySuffix suffix to add to the key, if it is not null * @param conf Configuration * @param keys list of keys in the order of preference * @return value of the key or default if a key was not found in configuration */ private static String getConfValue(String defaultValue, String keySuffix, Configuration conf, String... keys) { String value = null; for (String key : keys) { key = addSuffix(key, keySuffix); value = conf.get(key); if (value != null) { break; } } if (value == null) { value = defaultValue; } return value; } /** * Whether the pathname is valid. Currently prohibits relative paths, * names which contain a ":" or "//", or other non-canonical paths. */ public static boolean isValidName(String src) { // Path must be absolute. if (!src.startsWith(Path.SEPARATOR)) { return false; } // Check for ".." "." ":" "/" String[] components = StringUtils.split(src, '/'); for (int i = 0; i < components.length; i++) { String element = components[i]; if (element.equals(".") || (element.contains(":")) || (element.contains("/"))) { return false; } // ".." is allowed in path starting with /.reserved/.inodes if (element.equals("..")) { if (components.length > 4 && components[1].equals(".reserved") && components[2].equals(".inodes")) { continue; } return false; } // The string may start or end with a /, but not have // "//" in the middle. if (element.isEmpty() && i != components.length - 1 && i != 0) { return false; } } return true; } /** * Converts a time duration in milliseconds into DDD:HH:MM:SS format. */ public static String durationToString(long durationMs) { boolean negative = false; if (durationMs < 0) { negative = true; durationMs = -durationMs; } // Chop off the milliseconds long durationSec = durationMs / 1000; final int secondsPerMinute = 60; final int secondsPerHour = 60*60; final int secondsPerDay = 60*60*24; final long days = durationSec / secondsPerDay; durationSec -= days * secondsPerDay; final long hours = durationSec / secondsPerHour; durationSec -= hours * secondsPerHour; final long minutes = durationSec / secondsPerMinute; durationSec -= minutes * secondsPerMinute; final long seconds = durationSec; final long milliseconds = durationMs % 1000; String format = "%03d:%02d:%02d:%02d.%03d"; if (negative) { format = "-" + format; } return String.format(format, days, hours, minutes, seconds, milliseconds); } /** * Converts a Date into an ISO-8601 formatted datetime string. */ public static String dateToIso8601String(Date date) { SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH); return df.format(date); } }
15,224
34.324826
93
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.security.token.block; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.EnumSet; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @InterfaceAudience.Private public class BlockTokenIdentifier extends TokenIdentifier { static final Text KIND_NAME = new Text("HDFS_BLOCK_TOKEN"); public enum AccessMode { READ, WRITE, COPY, REPLACE } private long expiryDate; private int keyId; private String userId; private String blockPoolId; private long blockId; private final EnumSet<AccessMode> modes; private byte [] cache; public BlockTokenIdentifier() { this(null, null, 0, EnumSet.noneOf(AccessMode.class)); } public BlockTokenIdentifier(String userId, String bpid, long blockId, EnumSet<AccessMode> modes) { this.cache = null; this.userId = userId; this.blockPoolId = bpid; this.blockId = blockId; this.modes = modes == null ? EnumSet.noneOf(AccessMode.class) : modes; } @Override public Text getKind() { return KIND_NAME; } @Override public UserGroupInformation getUser() { if (userId == null || "".equals(userId)) { String user = blockPoolId + ":" + Long.toString(blockId); return UserGroupInformation.createRemoteUser(user); } return UserGroupInformation.createRemoteUser(userId); } public long getExpiryDate() { return expiryDate; } public void setExpiryDate(long expiryDate) { this.cache = null; this.expiryDate = expiryDate; } public int getKeyId() { return this.keyId; } public void setKeyId(int keyId) { this.cache = null; this.keyId = keyId; } public String getUserId() { return userId; } public String getBlockPoolId() { return blockPoolId; } public long getBlockId() { return blockId; } public EnumSet<AccessMode> getAccessModes() { return modes; } @Override public String toString() { return "block_token_identifier (expiryDate=" + this.getExpiryDate() + ", keyId=" + this.getKeyId() + ", userId=" + this.getUserId() + ", blockPoolId=" + this.getBlockPoolId() + ", blockId=" + this.getBlockId() + ", access modes=" + this.getAccessModes() + ")"; } static boolean isEqual(Object a, Object b) { return a == null ? b == null : a.equals(b); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof BlockTokenIdentifier) { BlockTokenIdentifier that = (BlockTokenIdentifier) obj; return this.expiryDate == that.expiryDate && this.keyId == that.keyId && isEqual(this.userId, that.userId) && isEqual(this.blockPoolId, that.blockPoolId) && this.blockId == that.blockId && isEqual(this.modes, that.modes); } return false; } @Override public int hashCode() { return (int) expiryDate ^ keyId ^ (int) blockId ^ modes.hashCode() ^ (userId == null ? 0 : userId.hashCode()) ^ (blockPoolId == null ? 0 : blockPoolId.hashCode()); } @Override public void readFields(DataInput in) throws IOException { this.cache = null; expiryDate = WritableUtils.readVLong(in); keyId = WritableUtils.readVInt(in); userId = WritableUtils.readString(in); blockPoolId = WritableUtils.readString(in); blockId = WritableUtils.readVLong(in); int length = WritableUtils.readVIntInRange(in, 0, AccessMode.class.getEnumConstants().length); for (int i = 0; i < length; i++) { modes.add(WritableUtils.readEnum(in, AccessMode.class)); } } @Override public void write(DataOutput out) throws IOException { WritableUtils.writeVLong(out, expiryDate); WritableUtils.writeVInt(out, keyId); WritableUtils.writeString(out, userId); WritableUtils.writeString(out, blockPoolId); WritableUtils.writeVLong(out, blockId); WritableUtils.writeVInt(out, modes.size()); for (AccessMode aMode : modes) { WritableUtils.writeEnum(out, aMode); } } @Override public byte[] getBytes() { if(cache == null) cache = super.getBytes(); return cache; } @InterfaceAudience.Private public static class Renewer extends Token.TrivialRenewer { @Override protected Text getKind() { return KIND_NAME; } } }
5,405
27.452632
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/DataEncryptionKey.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.security.token.block; import org.apache.hadoop.classification.InterfaceAudience; /** * A little struct class to contain all fields required to perform encryption of * the DataTransferProtocol. */ @InterfaceAudience.Private public class DataEncryptionKey { public final int keyId; public final String blockPoolId; public final byte[] nonce; public final byte[] encryptionKey; public final long expiryDate; public final String encryptionAlgorithm; public DataEncryptionKey(int keyId, String blockPoolId, byte[] nonce, byte[] encryptionKey, long expiryDate, String encryptionAlgorithm) { this.keyId = keyId; this.blockPoolId = blockPoolId; this.nonce = nonce; this.encryptionKey = encryptionKey; this.expiryDate = expiryDate; this.encryptionAlgorithm = encryptionAlgorithm; } @Override public String toString() { return keyId + "/" + blockPoolId + "/" + nonce.length + "/" + encryptionKey.length; } }
1,807
34.45098
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.security.token.delegation; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; /** * A delegation token identifier that is specific to HDFS. */ @InterfaceAudience.Private public class DelegationTokenIdentifier extends AbstractDelegationTokenIdentifier { public static final Text HDFS_DELEGATION_KIND = new Text("HDFS_DELEGATION_TOKEN"); /** * Create an empty delegation token identifier for reading into. */ public DelegationTokenIdentifier() { } /** * Create a new delegation token identifier * @param owner the effective username of the token owner * @param renewer the username of the renewer * @param realUser the real username of the token owner */ public DelegationTokenIdentifier(Text owner, Text renewer, Text realUser) { super(owner, renewer, realUser); } @Override public Text getKind() { return HDFS_DELEGATION_KIND; } @Override public String toString() { return getKind() + " token " + getSequenceNumber() + " for " + getUser().getShortUserName(); } /** @return a string representation of the token */ public static String stringifyToken(final Token<?> token) throws IOException { DelegationTokenIdentifier ident = new DelegationTokenIdentifier(); ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); ident.readFields(in); if (token.getService().getLength() > 0) { return ident + " on " + token.getService(); } else { return ident.toString(); } } public static class WebHdfsDelegationTokenIdentifier extends DelegationTokenIdentifier { public WebHdfsDelegationTokenIdentifier() { super(); } @Override public Text getKind() { return WebHdfsConstants.WEBHDFS_TOKEN_KIND; } } public static class SWebHdfsDelegationTokenIdentifier extends WebHdfsDelegationTokenIdentifier { public SWebHdfsDelegationTokenIdentifier() { super(); } @Override public Text getKind() { return WebHdfsConstants.SWEBHDFS_TOKEN_KIND; } } }
3,269
31.058824
85
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.security.token.delegation; import java.net.URI; import java.util.Collection; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; /** * A delegation token that is specialized for HDFS */ @InterfaceAudience.Private public class DelegationTokenSelector extends AbstractDelegationTokenSelector<DelegationTokenIdentifier>{ public static final String SERVICE_NAME_KEY = "hdfs.service.host_"; /** * Select the delegation token for hdfs. The port will be rewritten to * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. * This method should only be called by non-hdfs filesystems that do not * use the rpc port to acquire tokens. Ex. webhdfs, hftp * @param nnUri of the remote namenode * @param tokens as a collection * @param conf hadoop configuration * @return Token */ public Token<DelegationTokenIdentifier> selectToken( final URI nnUri, Collection<Token<?>> tokens, final Configuration conf) { // this guesses the remote cluster's rpc service port. // the current token design assumes it's the same as the local cluster's // rpc port unless a config key is set. there should be a way to automatic // and correctly determine the value Text serviceName = SecurityUtil.buildTokenService(nnUri); final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName); int nnRpcPort = HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; if (nnServiceName != null) { nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); } // use original hostname from the uri to avoid unintentional host resolving serviceName = SecurityUtil.buildTokenService( NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort)); return selectToken(serviceName, tokens); } public DelegationTokenSelector() { super(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); } }
3,122
40.64
83
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/Event.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.inotify; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import java.util.List; /** * Events sent by the inotify system. Note that no events are necessarily sent * when a file is opened for read (although a MetadataUpdateEvent will be sent * if the atime is updated). */ @InterfaceAudience.Public @InterfaceStability.Unstable public abstract class Event { public static enum EventType { CREATE, CLOSE, APPEND, RENAME, METADATA, UNLINK, TRUNCATE } private EventType eventType; public EventType getEventType() { return eventType; } public Event(EventType eventType) { this.eventType = eventType; } /** * Sent when a file is closed after append or create. */ @InterfaceAudience.Public public static class CloseEvent extends Event { private String path; private long fileSize; private long timestamp; public CloseEvent(String path, long fileSize, long timestamp) { super(EventType.CLOSE); this.path = path; this.fileSize = fileSize; this.timestamp = timestamp; } public String getPath() { return path; } /** * The size of the closed file in bytes. May be -1 if the size is not * available (e.g. in the case of a close generated by a concat operation). */ public long getFileSize() { return fileSize; } /** * The time when this event occurred, in milliseconds since the epoch. */ public long getTimestamp() { return timestamp; } @Override @InterfaceStability.Unstable public String toString() { return "CloseEvent [path=" + path + ", fileSize=" + fileSize + ", timestamp=" + timestamp + "]"; } } /** * Sent when a new file is created (including overwrite). */ @InterfaceAudience.Public public static class CreateEvent extends Event { public static enum INodeType { FILE, DIRECTORY, SYMLINK; } private INodeType iNodeType; private String path; private long ctime; private int replication; private String ownerName; private String groupName; private FsPermission perms; private String symlinkTarget; private boolean overwrite; private long defaultBlockSize; public static class Builder { private INodeType iNodeType; private String path; private long ctime; private int replication; private String ownerName; private String groupName; private FsPermission perms; private String symlinkTarget; private boolean overwrite; private long defaultBlockSize = 0; public Builder iNodeType(INodeType type) { this.iNodeType = type; return this; } public Builder path(String path) { this.path = path; return this; } public Builder ctime(long ctime) { this.ctime = ctime; return this; } public Builder replication(int replication) { this.replication = replication; return this; } public Builder ownerName(String ownerName) { this.ownerName = ownerName; return this; } public Builder groupName(String groupName) { this.groupName = groupName; return this; } public Builder perms(FsPermission perms) { this.perms = perms; return this; } public Builder symlinkTarget(String symlinkTarget) { this.symlinkTarget = symlinkTarget; return this; } public Builder overwrite(boolean overwrite) { this.overwrite = overwrite; return this; } public Builder defaultBlockSize(long defaultBlockSize) { this.defaultBlockSize = defaultBlockSize; return this; } public CreateEvent build() { return new CreateEvent(this); } } private CreateEvent(Builder b) { super(EventType.CREATE); this.iNodeType = b.iNodeType; this.path = b.path; this.ctime = b.ctime; this.replication = b.replication; this.ownerName = b.ownerName; this.groupName = b.groupName; this.perms = b.perms; this.symlinkTarget = b.symlinkTarget; this.overwrite = b.overwrite; this.defaultBlockSize = b.defaultBlockSize; } public INodeType getiNodeType() { return iNodeType; } public String getPath() { return path; } /** * Creation time of the file, directory, or symlink. */ public long getCtime() { return ctime; } /** * Replication is zero if the CreateEvent iNodeType is directory or symlink. */ public int getReplication() { return replication; } public String getOwnerName() { return ownerName; } public String getGroupName() { return groupName; } public FsPermission getPerms() { return perms; } /** * Symlink target is null if the CreateEvent iNodeType is not symlink. */ public String getSymlinkTarget() { return symlinkTarget; } public boolean getOverwrite() { return overwrite; } public long getDefaultBlockSize() { return defaultBlockSize; } @Override @InterfaceStability.Unstable public String toString() { StringBuilder content = new StringBuilder(); content.append("CreateEvent [INodeType=" + iNodeType + ", path=" + path + ", ctime=" + ctime + ", replication=" + replication + ", ownerName=" + ownerName + ", groupName=" + groupName + ", perms=" + perms + ", "); if (symlinkTarget != null) { content.append("symlinkTarget=" + symlinkTarget + ", "); } content.append("overwrite=" + overwrite + ", defaultBlockSize=" + defaultBlockSize + "]"); return content.toString(); } } /** * Sent when there is an update to directory or file (none of the metadata * tracked here applies to symlinks) that is not associated with another * inotify event. The tracked metadata includes atime/mtime, replication, * owner/group, permissions, ACLs, and XAttributes. Fields not relevant to the * metadataType of the MetadataUpdateEvent will be null or will have their default * values. */ @InterfaceAudience.Public public static class MetadataUpdateEvent extends Event { public static enum MetadataType { TIMES, REPLICATION, OWNER, PERMS, ACLS, XATTRS; } private String path; private MetadataType metadataType; private long mtime; private long atime; private int replication; private String ownerName; private String groupName; private FsPermission perms; private List<AclEntry> acls; private List<XAttr> xAttrs; private boolean xAttrsRemoved; public static class Builder { private String path; private MetadataType metadataType; private long mtime; private long atime; private int replication; private String ownerName; private String groupName; private FsPermission perms; private List<AclEntry> acls; private List<XAttr> xAttrs; private boolean xAttrsRemoved; public Builder path(String path) { this.path = path; return this; } public Builder metadataType(MetadataType type) { this.metadataType = type; return this; } public Builder mtime(long mtime) { this.mtime = mtime; return this; } public Builder atime(long atime) { this.atime = atime; return this; } public Builder replication(int replication) { this.replication = replication; return this; } public Builder ownerName(String ownerName) { this.ownerName = ownerName; return this; } public Builder groupName(String groupName) { this.groupName = groupName; return this; } public Builder perms(FsPermission perms) { this.perms = perms; return this; } public Builder acls(List<AclEntry> acls) { this.acls = acls; return this; } public Builder xAttrs(List<XAttr> xAttrs) { this.xAttrs = xAttrs; return this; } public Builder xAttrsRemoved(boolean xAttrsRemoved) { this.xAttrsRemoved = xAttrsRemoved; return this; } public MetadataUpdateEvent build() { return new MetadataUpdateEvent(this); } } private MetadataUpdateEvent(Builder b) { super(EventType.METADATA); this.path = b.path; this.metadataType = b.metadataType; this.mtime = b.mtime; this.atime = b.atime; this.replication = b.replication; this.ownerName = b.ownerName; this.groupName = b.groupName; this.perms = b.perms; this.acls = b.acls; this.xAttrs = b.xAttrs; this.xAttrsRemoved = b.xAttrsRemoved; } public String getPath() { return path; } public MetadataType getMetadataType() { return metadataType; } public long getMtime() { return mtime; } public long getAtime() { return atime; } public int getReplication() { return replication; } public String getOwnerName() { return ownerName; } public String getGroupName() { return groupName; } public FsPermission getPerms() { return perms; } /** * The full set of ACLs currently associated with this file or directory. * May be null if all ACLs were removed. */ public List<AclEntry> getAcls() { return acls; } public List<XAttr> getxAttrs() { return xAttrs; } /** * Whether the xAttrs returned by getxAttrs() were removed (as opposed to * added). */ public boolean isxAttrsRemoved() { return xAttrsRemoved; } @Override @InterfaceStability.Unstable public String toString() { StringBuilder content = new StringBuilder(); content.append("MetadataUpdateEvent [path=" + path + ", metadataType=" + metadataType); switch (metadataType) { case TIMES: content.append(", mtime=" + mtime + ", atime=" + atime); break; case REPLICATION: content.append(", replication=" + replication); break; case OWNER: content.append(", ownerName=" + ownerName + ", groupName=" + groupName); break; case PERMS: content.append(", perms=" + perms); break; case ACLS: content.append(", acls=" + acls); break; case XATTRS: content.append(", xAttrs=" + xAttrs + ", xAttrsRemoved=" + xAttrsRemoved); break; default: break; } content.append(']'); return content.toString(); } } /** * Sent when a file, directory, or symlink is renamed. */ @InterfaceAudience.Public public static class RenameEvent extends Event { private String srcPath; private String dstPath; private long timestamp; public static class Builder { private String srcPath; private String dstPath; private long timestamp; public Builder srcPath(String srcPath) { this.srcPath = srcPath; return this; } public Builder dstPath(String dstPath) { this.dstPath = dstPath; return this; } public Builder timestamp(long timestamp) { this.timestamp = timestamp; return this; } public RenameEvent build() { return new RenameEvent(this); } } private RenameEvent(Builder builder) { super(EventType.RENAME); this.srcPath = builder.srcPath; this.dstPath = builder.dstPath; this.timestamp = builder.timestamp; } public String getSrcPath() { return srcPath; } public String getDstPath() { return dstPath; } /** * The time when this event occurred, in milliseconds since the epoch. */ public long getTimestamp() { return timestamp; } @Override @InterfaceStability.Unstable public String toString() { return "RenameEvent [srcPath=" + srcPath + ", dstPath=" + dstPath + ", timestamp=" + timestamp + "]"; } } /** * Sent when an existing file is opened for append. */ @InterfaceAudience.Public public static class AppendEvent extends Event { private String path; private boolean newBlock; public static class Builder { private String path; private boolean newBlock; public Builder path(String path) { this.path = path; return this; } public Builder newBlock(boolean newBlock) { this.newBlock = newBlock; return this; } public AppendEvent build() { return new AppendEvent(this); } } private AppendEvent(Builder b) { super(EventType.APPEND); this.path = b.path; this.newBlock = b.newBlock; } public String getPath() { return path; } public boolean toNewBlock() { return newBlock; } @Override @InterfaceStability.Unstable public String toString() { return "AppendEvent [path=" + path + ", newBlock=" + newBlock + "]"; } } /** * Sent when a file, directory, or symlink is deleted. */ @InterfaceAudience.Public public static class UnlinkEvent extends Event { private String path; private long timestamp; public static class Builder { private String path; private long timestamp; public Builder path(String path) { this.path = path; return this; } public Builder timestamp(long timestamp) { this.timestamp = timestamp; return this; } public UnlinkEvent build() { return new UnlinkEvent(this); } } private UnlinkEvent(Builder builder) { super(EventType.UNLINK); this.path = builder.path; this.timestamp = builder.timestamp; } public String getPath() { return path; } /** * The time when this event occurred, in milliseconds since the epoch. */ public long getTimestamp() { return timestamp; } @Override @InterfaceStability.Unstable public String toString() { return "UnlinkEvent [path=" + path + ", timestamp=" + timestamp + "]"; } } /** * Sent when a file is truncated. */ @InterfaceAudience.Public public static class TruncateEvent extends Event { private String path; private long fileSize; private long timestamp; public TruncateEvent(String path, long fileSize, long timestamp) { super(EventType.TRUNCATE); this.path = path; this.fileSize = fileSize; this.timestamp = timestamp; } public String getPath() { return path; } /** * The size of the truncated file in bytes. */ public long getFileSize() { return fileSize; } /** * The time when this event occurred, in milliseconds since the epoch. */ public long getTimestamp() { return timestamp; } @Override @InterfaceStability.Unstable public String toString() { return "TruncateEvent [path=" + path + ", fileSize=" + fileSize + ", timestamp=" + timestamp + "]"; } } }
16,484
23.386095
84
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatchList.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.inotify; import org.apache.hadoop.classification.InterfaceAudience; import java.util.List; /** * Contains a list of event batches, the transaction ID in the edit log up to * which we read to produce these events, and the first txid we observed when * producing these events (the last of which is for the purpose of determining * whether we have missed events due to edit deletion). Also contains the most * recent txid that the NameNode has sync'ed, so the client can determine how * far behind in the edit log it is. */ @InterfaceAudience.Private public class EventBatchList { private List<EventBatch> batches; private long firstTxid; private long lastTxid; private long syncTxid; public EventBatchList(List<EventBatch> batches, long firstTxid, long lastTxid, long syncTxid) { this.batches = batches; this.firstTxid = firstTxid; this.lastTxid = lastTxid; this.syncTxid = syncTxid; } public List<EventBatch> getBatches() { return batches; } public long getFirstTxid() { return firstTxid; } public long getLastTxid() { return lastTxid; } public long getSyncTxid() { return syncTxid; } }
2,025
30.65625
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/inotify/EventBatch.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.inotify; import org.apache.hadoop.classification.InterfaceAudience; /** * A batch of events that all happened on the same transaction ID. */ @InterfaceAudience.Public public class EventBatch { private final long txid; private final Event[] events; public EventBatch(long txid, Event[] events) { this.txid = txid; this.events = events; } public long getTxid() { return txid; } public Event[] getEvents() { return events; } }
1,296
29.880952
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.client; /** Client configuration properties */ public interface HdfsClientConfigKeys { long SECOND = 1000L; long MINUTE = 60 * SECOND; String DFS_BLOCK_SIZE_KEY = "dfs.blocksize"; long DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024; String DFS_REPLICATION_KEY = "dfs.replication"; short DFS_REPLICATION_DEFAULT = 3; String DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern"; String DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$"; String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT = "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$"; static final String PREFIX = "dfs.client."; String DFS_NAMESERVICES = "dfs.nameservices"; int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070; String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address"; int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470; String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address"; String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes"; String DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled"; boolean DFS_WEBHDFS_ENABLED_DEFAULT = true; String DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port"; String DFS_NAMENODE_HTTPS_PORT_KEY = "dfs.https.port"; int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020; String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY = "dfs.namenode.kerberos.principal"; /** dfs.client.retry configuration properties */ interface Retry { String PREFIX = HdfsClientConfigKeys.PREFIX + "retry."; String POLICY_ENABLED_KEY = PREFIX + "policy.enabled"; boolean POLICY_ENABLED_DEFAULT = false; String POLICY_SPEC_KEY = PREFIX + "policy.spec"; String POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... String TIMES_GET_LAST_BLOCK_LENGTH_KEY = PREFIX + "times.get-last-block-length"; int TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT = 3; String INTERVAL_GET_LAST_BLOCK_LENGTH_KEY = PREFIX + "interval-ms.get-last-block-length"; int INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT = 4000; String MAX_ATTEMPTS_KEY = PREFIX + "max.attempts"; int MAX_ATTEMPTS_DEFAULT = 10; String WINDOW_BASE_KEY = PREFIX + "window.base"; int WINDOW_BASE_DEFAULT = 3000; } /** dfs.client.failover configuration properties */ interface Failover { String PREFIX = HdfsClientConfigKeys.PREFIX + "failover."; String PROXY_PROVIDER_KEY_PREFIX = PREFIX + "proxy.provider"; String MAX_ATTEMPTS_KEY = PREFIX + "max.attempts"; int MAX_ATTEMPTS_DEFAULT = 15; String SLEEPTIME_BASE_KEY = PREFIX + "sleep.base.millis"; int SLEEPTIME_BASE_DEFAULT = 500; String SLEEPTIME_MAX_KEY = PREFIX + "sleep.max.millis"; int SLEEPTIME_MAX_DEFAULT = 15000; String CONNECTION_RETRIES_KEY = PREFIX + "connection.retries"; int CONNECTION_RETRIES_DEFAULT = 0; String CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = PREFIX + "connection.retries.on.timeouts"; int CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0; } /** dfs.client.write configuration properties */ interface Write { String PREFIX = HdfsClientConfigKeys.PREFIX + "write."; String MAX_PACKETS_IN_FLIGHT_KEY = PREFIX + "max-packets-in-flight"; int MAX_PACKETS_IN_FLIGHT_DEFAULT = 80; String EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY = PREFIX + "exclude.nodes.cache.expiry.interval.millis"; long EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT = 10*MINUTE; interface ByteArrayManager { String PREFIX = Write.PREFIX + "byte-array-manager."; String ENABLED_KEY = PREFIX + "enabled"; boolean ENABLED_DEFAULT = false; String COUNT_THRESHOLD_KEY = PREFIX + "count-threshold"; int COUNT_THRESHOLD_DEFAULT = 128; String COUNT_LIMIT_KEY = PREFIX + "count-limit"; int COUNT_LIMIT_DEFAULT = 2048; String COUNT_RESET_TIME_PERIOD_MS_KEY = PREFIX + "count-reset-time-period-ms"; long COUNT_RESET_TIME_PERIOD_MS_DEFAULT = 10*SECOND; } } /** dfs.client.block.write configuration properties */ interface BlockWrite { String PREFIX = HdfsClientConfigKeys.PREFIX + "block.write."; String RETRIES_KEY = PREFIX + "retries"; int RETRIES_DEFAULT = 3; String LOCATEFOLLOWINGBLOCK_RETRIES_KEY = PREFIX + "locateFollowingBlock.retries"; int LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT = 5; String LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY = PREFIX + "locateFollowingBlock.initial.delay.ms"; int LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT = 400; interface ReplaceDatanodeOnFailure { String PREFIX = BlockWrite.PREFIX + "replace-datanode-on-failure."; String ENABLE_KEY = PREFIX + "enable"; boolean ENABLE_DEFAULT = true; String POLICY_KEY = PREFIX + "policy"; String POLICY_DEFAULT = "DEFAULT"; String BEST_EFFORT_KEY = PREFIX + "best-effort"; boolean BEST_EFFORT_DEFAULT = false; } } /** dfs.client.read configuration properties */ interface Read { String PREFIX = HdfsClientConfigKeys.PREFIX + "read."; String PREFETCH_SIZE_KEY = PREFIX + "prefetch.size"; interface ShortCircuit { String PREFIX = Read.PREFIX + "shortcircuit."; String KEY = PREFIX.substring(0, PREFIX.length()-1); boolean DEFAULT = false; String SKIP_CHECKSUM_KEY = PREFIX + "skip.checksum"; boolean SKIP_CHECKSUM_DEFAULT = false; String BUFFER_SIZE_KEY = PREFIX + "buffer.size"; int BUFFER_SIZE_DEFAULT = 1024 * 1024; String STREAMS_CACHE_SIZE_KEY = PREFIX + "streams.cache.size"; int STREAMS_CACHE_SIZE_DEFAULT = 256; String STREAMS_CACHE_EXPIRY_MS_KEY = PREFIX + "streams.cache.expiry.ms"; long STREAMS_CACHE_EXPIRY_MS_DEFAULT = 5*MINUTE; } } /** dfs.client.short.circuit configuration properties */ interface ShortCircuit { String PREFIX = Read.PREFIX + "short.circuit."; String REPLICA_STALE_THRESHOLD_MS_KEY = PREFIX + "replica.stale.threshold.ms"; long REPLICA_STALE_THRESHOLD_MS_DEFAULT = 30*MINUTE; } /** dfs.client.mmap configuration properties */ interface Mmap { String PREFIX = HdfsClientConfigKeys.PREFIX + "mmap."; String ENABLED_KEY = PREFIX + "enabled"; boolean ENABLED_DEFAULT = true; String CACHE_SIZE_KEY = PREFIX + "cache.size"; int CACHE_SIZE_DEFAULT = 256; String CACHE_TIMEOUT_MS_KEY = PREFIX + "cache.timeout.ms"; long CACHE_TIMEOUT_MS_DEFAULT = 60*MINUTE; String RETRY_TIMEOUT_MS_KEY = PREFIX + "retry.timeout.ms"; long RETRY_TIMEOUT_MS_DEFAULT = 5*MINUTE; } /** dfs.client.hedged.read configuration properties */ interface HedgedRead { String THRESHOLD_MILLIS_KEY = PREFIX + "threshold.millis"; long THRESHOLD_MILLIS_DEFAULT = 500; String THREADPOOL_SIZE_KEY = PREFIX + "threadpool.size"; int THREADPOOL_SIZE_DEFAULT = 0; } /** dfs.http.client configuration properties */ interface HttpClient { String PREFIX = "dfs.http.client."; // retry String RETRY_POLICY_ENABLED_KEY = PREFIX + "retry.policy.enabled"; boolean RETRY_POLICY_ENABLED_DEFAULT = false; String RETRY_POLICY_SPEC_KEY = PREFIX + "retry.policy.spec"; String RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... String RETRY_MAX_ATTEMPTS_KEY = PREFIX + "retry.max.attempts"; int RETRY_MAX_ATTEMPTS_DEFAULT = 10; // failover String FAILOVER_MAX_ATTEMPTS_KEY = PREFIX + "failover.max.attempts"; int FAILOVER_MAX_ATTEMPTS_DEFAULT = 15; String FAILOVER_SLEEPTIME_BASE_KEY = PREFIX + "failover.sleep.base.millis"; int FAILOVER_SLEEPTIME_BASE_DEFAULT = 500; String FAILOVER_SLEEPTIME_MAX_KEY = PREFIX + "failover.sleep.max.millis"; int FAILOVER_SLEEPTIME_MAX_DEFAULT = 15000; } }
8,753
41.289855
163
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.StringUtils; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.ObjectReader; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; class JsonUtilClient { static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {}; /** Convert a Json map to a RemoteException. */ static RemoteException toRemoteException(final Map<?, ?> json) { final Map<?, ?> m = (Map<?, ?>)json.get(RemoteException.class.getSimpleName()); final String message = (String)m.get("message"); final String javaClassName = (String)m.get("javaClassName"); return new RemoteException(javaClassName, message); } /** Convert a Json map to a Token. */ static Token<? extends TokenIdentifier> toToken( final Map<?, ?> m) throws IOException { if (m == null) { return null; } final Token<DelegationTokenIdentifier> token = new Token<>(); token.decodeFromUrlString((String)m.get("urlString")); return token; } /** Convert a Json map to a Token of BlockTokenIdentifier. */ @SuppressWarnings("unchecked") static Token<BlockTokenIdentifier> toBlockToken( final Map<?, ?> m) throws IOException { return (Token<BlockTokenIdentifier>)toToken(m); } /** Convert a string to a FsPermission object. */ static FsPermission toFsPermission( final String s, Boolean aclBit, Boolean encBit) { FsPermission perm = new FsPermission(Short.parseShort(s, 8)); final boolean aBit = (aclBit != null) ? aclBit : false; final boolean eBit = (encBit != null) ? encBit : false; if (aBit || eBit) { return new FsPermissionExtension(perm, aBit, eBit); } else { return perm; } } /** Convert a Json map to a HdfsFileStatus object. */ static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) { if (json == null) { return null; } final Map<?, ?> m = includesType ? (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json; final String localName = (String) m.get("pathSuffix"); final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type")); final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null : DFSUtilClient.string2Bytes((String) m.get("symlink")); final long len = ((Number) m.get("length")).longValue(); final String owner = (String) m.get("owner"); final String group = (String) m.get("group"); final FsPermission permission = toFsPermission((String) m.get("permission"), (Boolean) m.get("aclBit"), (Boolean) m.get("encBit")); final long aTime = ((Number) m.get("accessTime")).longValue(); final long mTime = ((Number) m.get("modificationTime")).longValue(); final long blockSize = ((Number) m.get("blockSize")).longValue(); final short replication = ((Number) m.get("replication")).shortValue(); final long fileId = m.containsKey("fileId") ? ((Number) m.get("fileId")).longValue() : HdfsConstants.GRANDFATHER_INODE_ID; final int childrenNum = getInt(m, "childrenNum", -1); final byte storagePolicy = m.containsKey("storagePolicy") ? (byte) ((Number) m.get("storagePolicy")).longValue() : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication, blockSize, mTime, aTime, permission, owner, group, symlink, DFSUtilClient.string2Bytes(localName), fileId, childrenNum, null, storagePolicy); } /** Convert a Json map to an ExtendedBlock object. */ static ExtendedBlock toExtendedBlock(final Map<?, ?> m) { if (m == null) { return null; } final String blockPoolId = (String)m.get("blockPoolId"); final long blockId = ((Number) m.get("blockId")).longValue(); final long numBytes = ((Number) m.get("numBytes")).longValue(); final long generationStamp = ((Number) m.get("generationStamp")).longValue(); return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp); } static int getInt(Map<?, ?> m, String key, final int defaultValue) { Object value = m.get(key); if (value == null) { return defaultValue; } return ((Number) value).intValue(); } static long getLong(Map<?, ?> m, String key, final long defaultValue) { Object value = m.get(key); if (value == null) { return defaultValue; } return ((Number) value).longValue(); } static String getString( Map<?, ?> m, String key, final String defaultValue) { Object value = m.get(key); if (value == null) { return defaultValue; } return (String) value; } static List<?> getList(Map<?, ?> m, String key) { Object list = m.get(key); if (list instanceof List<?>) { return (List<?>) list; } else { return null; } } /** Convert a Json map to an DatanodeInfo object. */ static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) throws IOException { if (m == null) { return null; } // ipAddr and xferPort are the critical fields for accessing data. // If any one of the two is missing, an exception needs to be thrown. // Handle the case of old servers (1.x, 0.23.x) sending 'name' instead // of ipAddr and xferPort. String ipAddr = getString(m, "ipAddr", null); int xferPort = getInt(m, "xferPort", -1); if (ipAddr == null) { String name = getString(m, "name", null); if (name != null) { int colonIdx = name.indexOf(':'); if (colonIdx > 0) { ipAddr = name.substring(0, colonIdx); xferPort = Integer.parseInt(name.substring(colonIdx +1)); } else { throw new IOException( "Invalid value in server response: name=[" + name + "]"); } } else { throw new IOException( "Missing both 'ipAddr' and 'name' in server response."); } // ipAddr is non-null & non-empty string at this point. } // Check the validity of xferPort. if (xferPort == -1) { throw new IOException( "Invalid or missing 'xferPort' in server response."); } // TODO: Fix storageID return new DatanodeInfo( ipAddr, (String)m.get("hostName"), (String)m.get("storageID"), xferPort, ((Number) m.get("infoPort")).intValue(), getInt(m, "infoSecurePort", 0), ((Number) m.get("ipcPort")).intValue(), getLong(m, "capacity", 0l), getLong(m, "dfsUsed", 0l), getLong(m, "remaining", 0l), getLong(m, "blockPoolUsed", 0l), getLong(m, "cacheCapacity", 0l), getLong(m, "cacheUsed", 0l), getLong(m, "lastUpdate", 0l), getLong(m, "lastUpdateMonotonic", 0l), getInt(m, "xceiverCount", 0), getString(m, "networkLocation", ""), DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL"))); } /** Convert an Object[] to a DatanodeInfo[]. */ static DatanodeInfo[] toDatanodeInfoArray(final List<?> objects) throws IOException { if (objects == null) { return null; } else if (objects.isEmpty()) { return EMPTY_DATANODE_INFO_ARRAY; } else { final DatanodeInfo[] array = new DatanodeInfo[objects.size()]; int i = 0; for (Object object : objects) { array[i++] = toDatanodeInfo((Map<?, ?>) object); } return array; } } /** Convert a Json map to LocatedBlock. */ static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException { if (m == null) { return null; } final ExtendedBlock b = toExtendedBlock((Map<?, ?>)m.get("block")); final DatanodeInfo[] locations = toDatanodeInfoArray( getList(m, "locations")); final long startOffset = ((Number) m.get("startOffset")).longValue(); final boolean isCorrupt = (Boolean)m.get("isCorrupt"); final DatanodeInfo[] cachedLocations = toDatanodeInfoArray( getList(m, "cachedLocations")); final LocatedBlock locatedblock = new LocatedBlock(b, locations, null, null, startOffset, isCorrupt, cachedLocations); locatedblock.setBlockToken(toBlockToken((Map<?, ?>)m.get("blockToken"))); return locatedblock; } /** Convert an List of Object to a List of LocatedBlock. */ static List<LocatedBlock> toLocatedBlockList( final List<?> objects) throws IOException { if (objects == null) { return null; } else if (objects.isEmpty()) { return Collections.emptyList(); } else { final List<LocatedBlock> list = new ArrayList<>(objects.size()); for (Object object : objects) { list.add(toLocatedBlock((Map<?, ?>) object)); } return list; } } /** Convert a Json map to a ContentSummary. */ static ContentSummary toContentSummary(final Map<?, ?> json) { if (json == null) { return null; } final Map<?, ?> m = (Map<?, ?>)json.get(ContentSummary.class.getSimpleName()); final long length = ((Number) m.get("length")).longValue(); final long fileCount = ((Number) m.get("fileCount")).longValue(); final long directoryCount = ((Number) m.get("directoryCount")).longValue(); final long quota = ((Number) m.get("quota")).longValue(); final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue(); final long spaceQuota = ((Number) m.get("spaceQuota")).longValue(); return new ContentSummary.Builder().length(length).fileCount(fileCount). directoryCount(directoryCount).quota(quota).spaceConsumed(spaceConsumed). spaceQuota(spaceQuota).build(); } /** Convert a Json map to a MD5MD5CRC32FileChecksum. */ static MD5MD5CRC32FileChecksum toMD5MD5CRC32FileChecksum( final Map<?, ?> json) throws IOException { if (json == null) { return null; } final Map<?, ?> m = (Map<?, ?>)json.get(FileChecksum.class.getSimpleName()); final String algorithm = (String)m.get("algorithm"); final int length = ((Number) m.get("length")).intValue(); final byte[] bytes = StringUtils.hexStringToByte((String) m.get("bytes")); final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes)); final DataChecksum.Type crcType = MD5MD5CRC32FileChecksum.getCrcTypeFromAlgorithmName(algorithm); final MD5MD5CRC32FileChecksum checksum; // Recreate what DFSClient would have returned. switch(crcType) { case CRC32: checksum = new MD5MD5CRC32GzipFileChecksum(); break; case CRC32C: checksum = new MD5MD5CRC32CastagnoliFileChecksum(); break; default: throw new IOException("Unknown algorithm: " + algorithm); } checksum.readFields(in); //check algorithm name if (!checksum.getAlgorithmName().equals(algorithm)) { throw new IOException("Algorithm not matched. Expected " + algorithm + ", Received " + checksum.getAlgorithmName()); } //check length if (length != checksum.getLength()) { throw new IOException("Length not matched: length=" + length + ", checksum.getLength()=" + checksum.getLength()); } return checksum; } /** Convert a Json map to a AclStatus object. */ static AclStatus toAclStatus(final Map<?, ?> json) { if (json == null) { return null; } final Map<?, ?> m = (Map<?, ?>) json.get(AclStatus.class.getSimpleName()); AclStatus.Builder aclStatusBuilder = new AclStatus.Builder(); aclStatusBuilder.owner((String) m.get("owner")); aclStatusBuilder.group((String) m.get("group")); aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit")); String permString = (String) m.get("permission"); if (permString != null) { final FsPermission permission = toFsPermission(permString, (Boolean) m.get("aclBit"), (Boolean) m.get("encBit")); aclStatusBuilder.setPermission(permission); } final List<?> entries = (List<?>) m.get("entries"); List<AclEntry> aclEntryList = new ArrayList<>(); for (Object entry : entries) { AclEntry aclEntry = AclEntry.parseAclEntry((String) entry, true); aclEntryList.add(aclEntry); } aclStatusBuilder.addEntries(aclEntryList); return aclStatusBuilder.build(); } static String getPath(final Map<?, ?> json) throws IOException { if (json == null) { return null; } String path = (String) json.get("Path"); return path; } static byte[] getXAttr(final Map<?, ?> json, final String name) throws IOException { if (json == null) { return null; } Map<String, byte[]> xAttrs = toXAttrs(json); if (xAttrs != null) { return xAttrs.get(name); } return null; } /** Expecting only single XAttr in the map. return its value */ static byte[] getXAttr(final Map<?, ?> json) throws IOException { if (json == null) { return null; } Map<String, byte[]> xAttrs = toXAttrs(json); if (xAttrs != null && !xAttrs.values().isEmpty()) { return xAttrs.values().iterator().next(); } return null; } static Map<String, byte[]> toXAttrs(final Map<?, ?> json) throws IOException { if (json == null) { return null; } return toXAttrMap(getList(json, "XAttrs")); } static List<String> toXAttrNames(final Map<?, ?> json) throws IOException { if (json == null) { return null; } final String namesInJson = (String) json.get("XAttrNames"); ObjectReader reader = new ObjectMapper().reader(List.class); final List<Object> xattrs = reader.readValue(namesInJson); final List<String> names = Lists.newArrayListWithCapacity(json.keySet().size()); for (Object xattr : xattrs) { names.add((String) xattr); } return names; } static Map<String, byte[]> toXAttrMap(final List<?> objects) throws IOException { if (objects == null) { return null; } else if (objects.isEmpty()) { return Maps.newHashMap(); } else { final Map<String, byte[]> xAttrs = Maps.newHashMap(); for (Object object : objects) { Map<?, ?> m = (Map<?, ?>) object; String name = (String) m.get("name"); String value = (String) m.get("value"); xAttrs.put(name, decodeXAttrValue(value)); } return xAttrs; } } static byte[] decodeXAttrValue(String value) throws IOException { if (value != null) { return XAttrCodec.decodeValue(value); } else { return new byte[0]; } } /** Convert a Json map to a Token of DelegationTokenIdentifier. */ @SuppressWarnings("unchecked") static Token<DelegationTokenIdentifier> toDelegationToken( final Map<?, ?> json) throws IOException { final Map<?, ?> m = (Map<?, ?>)json.get(Token.class.getSimpleName()); return (Token<DelegationTokenIdentifier>) toToken(m); } /** Convert a Json map to LocatedBlock. */ static LocatedBlocks toLocatedBlocks( final Map<?, ?> json) throws IOException { if (json == null) { return null; } final Map<?, ?> m = (Map<?, ?>)json.get(LocatedBlocks.class.getSimpleName()); final long fileLength = ((Number) m.get("fileLength")).longValue(); final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction"); final List<LocatedBlock> locatedBlocks = toLocatedBlockList( getList(m, "locatedBlocks")); final LocatedBlock lastLocatedBlock = toLocatedBlock( (Map<?, ?>) m.get("lastLocatedBlock")); final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete"); return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks, lastLocatedBlock, isLastBlockComplete, null); } }
18,193
34.67451
101
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.io.Text; @InterfaceAudience.Private public class WebHdfsConstants { public static final String HFTP_SCHEME = "hftp"; public static final Text HFTP_TOKEN_KIND = new Text("HFTP delegation"); public static final Text HSFTP_TOKEN_KIND = new Text("HSFTP delegation"); public static final String HSFTP_SCHEME = "hsftp"; public static final String WEBHDFS_SCHEME = "webhdfs"; public static final String SWEBHDFS_SCHEME = "swebhdfs"; public static final Text WEBHDFS_TOKEN_KIND = new Text("WEBHDFS delegation"); public static final Text SWEBHDFS_TOKEN_KIND = new Text("SWEBHDFS delegation"); enum PathType { FILE, DIRECTORY, SYMLINK; static PathType valueOf(HdfsFileStatus status) { return status.isDir()? DIRECTORY: status.isSymlink()? SYMLINK: FILE; } } }
1,774
40.27907
81
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DelegationTokenRenewer; import org.apache.hadoop.fs.DelegationTokenRenewer.Renewable; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenRenewer; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; /** * This class implements the aspects that relate to delegation tokens for all * HTTP-based file system. */ final class TokenAspect<T extends FileSystem & Renewable> { @InterfaceAudience.Private public static class TokenManager extends TokenRenewer { @Override public void cancel(Token<?> token, Configuration conf) throws IOException { getInstance(token, conf).cancelDelegationToken(token); } @Override public boolean handleKind(Text kind) { return kind.equals(WebHdfsConstants.HFTP_TOKEN_KIND) || kind.equals(WebHdfsConstants.HSFTP_TOKEN_KIND) || kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND) || kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND); } @Override public boolean isManaged(Token<?> token) throws IOException { return true; } @Override public long renew(Token<?> token, Configuration conf) throws IOException { return getInstance(token, conf).renewDelegationToken(token); } private TokenManagementDelegator getInstance(Token<?> token, Configuration conf) throws IOException { final URI uri; final String scheme = getSchemeByKind(token.getKind()); if (HAUtilClient.isTokenForLogicalUri(token)) { uri = HAUtilClient.getServiceUriFromToken(scheme, token); } else { final InetSocketAddress address = SecurityUtil.getTokenServiceAddr (token); uri = URI.create(scheme + "://" + NetUtils.getHostPortString(address)); } return (TokenManagementDelegator) FileSystem.get(uri, conf); } private static String getSchemeByKind(Text kind) { if (kind.equals(WebHdfsConstants.HFTP_TOKEN_KIND)) { return WebHdfsConstants.HFTP_SCHEME; } else if (kind.equals(WebHdfsConstants.HSFTP_TOKEN_KIND)) { return WebHdfsConstants.HSFTP_SCHEME; } else if (kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)) { return WebHdfsConstants.WEBHDFS_SCHEME; } else if (kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND)) { return WebHdfsConstants.SWEBHDFS_SCHEME; } else { throw new IllegalArgumentException("Unsupported scheme"); } } } private static class DTSelecorByKind extends AbstractDelegationTokenSelector<DelegationTokenIdentifier> { public DTSelecorByKind(final Text kind) { super(kind); } } /** * Callbacks for token management */ interface TokenManagementDelegator { void cancelDelegationToken(final Token<?> token) throws IOException; long renewDelegationToken(final Token<?> token) throws IOException; } private DelegationTokenRenewer.RenewAction<?> action; private DelegationTokenRenewer dtRenewer = null; private final DTSelecorByKind dtSelector; private final T fs; private boolean hasInitedToken; private final Logger LOG; private final Text serviceName; TokenAspect(T fs, final Text serviceName, final Text kind) { this.LOG = LoggerFactory.getLogger(fs.getClass()); this.fs = fs; this.dtSelector = new DTSelecorByKind(kind); this.serviceName = serviceName; } synchronized void ensureTokenInitialized() throws IOException { // we haven't inited yet, or we used to have a token but it expired if (!hasInitedToken || (action != null && !action.isValid())) { //since we don't already have a token, go get one Token<?> token = fs.getDelegationToken(null); // security might be disabled if (token != null) { fs.setDelegationToken(token); addRenewAction(fs); if (LOG.isDebugEnabled()) { LOG.debug("Created new DT for {}", token.getService()); } } hasInitedToken = true; } } public synchronized void reset() { hasInitedToken = false; } synchronized void initDelegationToken(UserGroupInformation ugi) { Token<?> token = selectDelegationToken(ugi); if (token != null) { if (LOG.isDebugEnabled()) { LOG.debug("Found existing DT for {}", token.getService()); } fs.setDelegationToken(token); hasInitedToken = true; } } synchronized void removeRenewAction() throws IOException { if (dtRenewer != null) { dtRenewer.removeRenewAction(fs); } } @VisibleForTesting Token<DelegationTokenIdentifier> selectDelegationToken( UserGroupInformation ugi) { return dtSelector.selectToken(serviceName, ugi.getTokens()); } private synchronized void addRenewAction(final T webhdfs) { if (dtRenewer == null) { dtRenewer = DelegationTokenRenewer.getInstance(); } action = dtRenewer.addRenewAction(webhdfs); } }
6,505
33.978495
83
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URL; import java.util.List; import java.util.Map; import java.util.StringTokenizer; import org.apache.commons.io.input.BoundedInputStream; import org.apache.hadoop.fs.FSInputStream; import org.apache.http.HttpStatus; import com.google.common.annotations.VisibleForTesting; import com.google.common.net.HttpHeaders; /** * To support HTTP byte streams, a new connection to an HTTP server needs to be * created each time. This class hides the complexity of those multiple * connections from the client. Whenever seek() is called, a new connection * is made on the successive read(). The normal input stream functions are * connected to the currently active input stream. */ public abstract class ByteRangeInputStream extends FSInputStream { /** * This class wraps a URL and provides method to open connection. * It can be overridden to change how a connection is opened. */ public static abstract class URLOpener { protected URL url; public URLOpener(URL u) { url = u; } public void setURL(URL u) { url = u; } public URL getURL() { return url; } /** Connect to server with a data offset. */ protected abstract HttpURLConnection connect(final long offset, final boolean resolved) throws IOException; } static class InputStreamAndFileLength { final Long length; final InputStream in; InputStreamAndFileLength(Long length, InputStream in) { this.length = length; this.in = in; } } enum StreamStatus { NORMAL, SEEK, CLOSED } protected InputStream in; protected final URLOpener originalURL; protected final URLOpener resolvedURL; protected long startPos = 0; protected long currentPos = 0; protected Long fileLength = null; StreamStatus status = StreamStatus.SEEK; /** * Create with the specified URLOpeners. Original url is used to open the * stream for the first time. Resolved url is used in subsequent requests. * @param o Original url * @param r Resolved url */ public ByteRangeInputStream(URLOpener o, URLOpener r) throws IOException { this.originalURL = o; this.resolvedURL = r; getInputStream(); } protected abstract URL getResolvedUrl(final HttpURLConnection connection ) throws IOException; @VisibleForTesting protected InputStream getInputStream() throws IOException { switch (status) { case NORMAL: break; case SEEK: if (in != null) { in.close(); } InputStreamAndFileLength fin = openInputStream(startPos); in = fin.in; fileLength = fin.length; status = StreamStatus.NORMAL; break; case CLOSED: throw new IOException("Stream closed"); } return in; } @VisibleForTesting protected InputStreamAndFileLength openInputStream(long startOffset) throws IOException { // Use the original url if no resolved url exists, eg. if // it's the first time a request is made. final boolean resolved = resolvedURL.getURL() != null; final URLOpener opener = resolved? resolvedURL: originalURL; final HttpURLConnection connection = opener.connect(startOffset, resolved); resolvedURL.setURL(getResolvedUrl(connection)); InputStream in = connection.getInputStream(); final Long length; final Map<String, List<String>> headers = connection.getHeaderFields(); if (isChunkedTransferEncoding(headers)) { // file length is not known length = null; } else { // for non-chunked transfer-encoding, get content-length long streamlength = getStreamLength(connection, headers); length = startOffset + streamlength; // Java has a bug with >2GB request streams. It won't bounds check // the reads so the transfer blocks until the server times out in = new BoundedInputStream(in, streamlength); } return new InputStreamAndFileLength(length, in); } private static long getStreamLength(HttpURLConnection connection, Map<String, List<String>> headers) throws IOException { String cl = connection.getHeaderField(HttpHeaders.CONTENT_LENGTH); if (cl == null) { // Try to get the content length by parsing the content range // because HftpFileSystem does not return the content length // if the content is partial. if (connection.getResponseCode() == HttpStatus.SC_PARTIAL_CONTENT) { cl = connection.getHeaderField(HttpHeaders.CONTENT_RANGE); return getLengthFromRange(cl); } else { throw new IOException(HttpHeaders.CONTENT_LENGTH + " is missing: " + headers); } } return Long.parseLong(cl); } private static long getLengthFromRange(String cl) throws IOException { try { String[] str = cl.substring(6).split("[-/]"); return Long.parseLong(str[1]) - Long.parseLong(str[0]) + 1; } catch (Exception e) { throw new IOException( "failed to get content length by parsing the content range: " + cl + " " + e.getMessage()); } } private static boolean isChunkedTransferEncoding( final Map<String, List<String>> headers) { return contains(headers, HttpHeaders.TRANSFER_ENCODING, "chunked") || contains(headers, HttpHeaders.TE, "chunked"); } /** Does the HTTP header map contain the given key, value pair? */ private static boolean contains(final Map<String, List<String>> headers, final String key, final String value) { final List<String> values = headers.get(key); if (values != null) { for(String v : values) { for(final StringTokenizer t = new StringTokenizer(v, ","); t.hasMoreTokens(); ) { if (value.equalsIgnoreCase(t.nextToken())) { return true; } } } } return false; } private int update(final int n) throws IOException { if (n != -1) { currentPos += n; } else if (fileLength != null && currentPos < fileLength) { throw new IOException("Got EOF but currentPos = " + currentPos + " < filelength = " + fileLength); } return n; } @Override public int read() throws IOException { final int b = getInputStream().read(); update((b == -1) ? -1 : 1); return b; } @Override public int read(byte b[], int off, int len) throws IOException { return update(getInputStream().read(b, off, len)); } /** * Seek to the given offset from the start of the file. * The next read() will be from that location. Can't * seek past the end of the file. */ @Override public void seek(long pos) throws IOException { if (pos != currentPos) { startPos = pos; currentPos = pos; if (status != StreamStatus.CLOSED) { status = StreamStatus.SEEK; } } } @Override public int read(long position, byte[] buffer, int offset, int length) throws IOException { try (InputStream in = openInputStream(position).in) { return in.read(buffer, offset, length); } } @Override public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { final InputStreamAndFileLength fin = openInputStream(position); if (fin.length != null && length + position > fin.length) { throw new EOFException("The length to read " + length + " exceeds the file length " + fin.length); } try { int nread = 0; while (nread < length) { int nbytes = fin.in.read(buffer, offset + nread, length - nread); if (nbytes < 0) { throw new EOFException("End of file reached before reading fully."); } nread += nbytes; } } finally { fin.in.close(); } } /** * Return the current offset from the start of the file */ @Override public long getPos() throws IOException { return currentPos; } /** * Seeks a different copy of the data. Returns true if * found a new source, false otherwise. */ @Override public boolean seekToNewSource(long targetPos) throws IOException { return false; } @Override public void close() throws IOException { if (in != null) { in.close(); in = null; } status = StreamStatus.CLOSED; } }
9,266
29.483553
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
/** res * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web; import java.io.BufferedOutputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.MalformedURLException; import java.net.URI; import java.net.URL; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.StringTokenizer; import javax.ws.rs.core.MediaType; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.DelegationTokenRenewer; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.web.resources.*; import org.apache.hadoop.hdfs.web.resources.HttpOpParam.Op; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.StringUtils; import org.codehaus.jackson.map.ObjectMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; /** A FileSystem for HDFS over the web. */ public class WebHdfsFileSystem extends FileSystem implements DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator { public static final Logger LOG = LoggerFactory .getLogger(WebHdfsFileSystem.class); /** WebHdfs version. */ public static final int VERSION = 1; /** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */ public static final String PATH_PREFIX = "/" + WebHdfsConstants.WEBHDFS_SCHEME + "/v" + VERSION; /** Default connection factory may be overridden in tests to use smaller timeout values */ protected URLConnectionFactory connectionFactory; @VisibleForTesting public static final String CANT_FALLBACK_TO_INSECURE_MSG = "The client is configured to only allow connecting to secure cluster"; private boolean canRefreshDelegationToken; private UserGroupInformation ugi; private URI uri; private Token<?> delegationToken; protected Text tokenServiceName; private RetryPolicy retryPolicy = null; private Path workingDir; private Path cachedHomeDirectory; private InetSocketAddress nnAddrs[]; private int currentNNAddrIndex; private boolean disallowFallbackToInsecureCluster; /** * Return the protocol scheme for the FileSystem. * <p/> * * @return <code>webhdfs</code> */ @Override public String getScheme() { return WebHdfsConstants.WEBHDFS_SCHEME; } /** * return the underlying transport protocol (http / https). */ protected String getTransportScheme() { return "http"; } protected Text getTokenKind() { return WebHdfsConstants.WEBHDFS_TOKEN_KIND; } @Override public synchronized void initialize(URI uri, Configuration conf ) throws IOException { super.initialize(uri, conf); setConf(conf); /** set user pattern based on configuration file */ UserParam.setUserPattern(conf.get( HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); connectionFactory = URLConnectionFactory .newDefaultURLConnectionFactory(conf); ugi = UserGroupInformation.getCurrentUser(); this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.nnAddrs = resolveNNAddr(); boolean isHA = HAUtilClient.isClientFailoverConfigured(conf, this.uri); boolean isLogicalUri = isHA && HAUtilClient.isLogicalUri(conf, this.uri); // In non-HA or non-logical URI case, the code needs to call // getCanonicalUri() in order to handle the case where no port is // specified in the URI this.tokenServiceName = isLogicalUri ? HAUtilClient.buildTokenServiceForLogicalUri(uri, getScheme()) : SecurityUtil.buildTokenService(getCanonicalUri()); if (!isHA) { this.retryPolicy = RetryUtils.getDefaultRetryPolicy( conf, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_KEY, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT, HdfsConstants.SAFEMODE_EXCEPTION_CLASS_NAME); } else { int maxFailoverAttempts = conf.getInt( HdfsClientConfigKeys.HttpClient.FAILOVER_MAX_ATTEMPTS_KEY, HdfsClientConfigKeys.HttpClient.FAILOVER_MAX_ATTEMPTS_DEFAULT); int maxRetryAttempts = conf.getInt( HdfsClientConfigKeys.HttpClient.RETRY_MAX_ATTEMPTS_KEY, HdfsClientConfigKeys.HttpClient.RETRY_MAX_ATTEMPTS_DEFAULT); int failoverSleepBaseMillis = conf.getInt( HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_BASE_KEY, HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_BASE_DEFAULT); int failoverSleepMaxMillis = conf.getInt( HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_MAX_KEY, HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_MAX_DEFAULT); this.retryPolicy = RetryPolicies .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis, failoverSleepMaxMillis); } this.workingDir = makeQualified(new Path(getHomeDirectoryString(ugi))); this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled(); this.disallowFallbackToInsecureCluster = !conf.getBoolean( CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT); this.delegationToken = null; } @Override public URI getCanonicalUri() { return super.getCanonicalUri(); } /** Is WebHDFS enabled in conf? */ public static boolean isEnabled(final Configuration conf) { final boolean b = conf.getBoolean( HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT); return b; } TokenSelector<DelegationTokenIdentifier> tokenSelector = new AbstractDelegationTokenSelector<DelegationTokenIdentifier>(getTokenKind()){}; // the first getAuthParams() for a non-token op will either get the // internal token from the ugi or lazy fetch one protected synchronized Token<?> getDelegationToken() throws IOException { if (canRefreshDelegationToken && delegationToken == null) { Token<?> token = tokenSelector.selectToken( new Text(getCanonicalServiceName()), ugi.getTokens()); // ugi tokens are usually indicative of a task which can't // refetch tokens. even if ugi has credentials, don't attempt // to get another token to match hdfs/rpc behavior if (token != null) { if(LOG.isDebugEnabled()) { LOG.debug("Using UGI token: {}", token); } canRefreshDelegationToken = false; } else { token = getDelegationToken(null); if (token != null) { if(LOG.isDebugEnabled()) { LOG.debug("Fetched new token: {}", token); } } else { // security is disabled canRefreshDelegationToken = false; } } setDelegationToken(token); } return delegationToken; } @VisibleForTesting synchronized boolean replaceExpiredDelegationToken() throws IOException { boolean replaced = false; if (canRefreshDelegationToken) { Token<?> token = getDelegationToken(null); if(LOG.isDebugEnabled()) { LOG.debug("Replaced expired token: {}", token); } setDelegationToken(token); replaced = (token != null); } return replaced; } @Override @VisibleForTesting public int getDefaultPort() { return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); } @Override public URI getUri() { return this.uri; } @Override protected URI canonicalizeUri(URI uri) { return NetUtils.getCanonicalUri(uri, getDefaultPort()); } /** @return the home directory */ @Deprecated public static String getHomeDirectoryString(final UserGroupInformation ugi) { return "/user/" + ugi.getShortUserName(); } @Override public Path getHomeDirectory() { if (cachedHomeDirectory == null) { final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY; try { String pathFromDelegatedFS = new FsPathResponseRunner<String>(op, null, new UserParam(ugi)) { @Override String decodeResponse(Map<?, ?> json) throws IOException { return JsonUtilClient.getPath(json); } } .run(); cachedHomeDirectory = new Path(pathFromDelegatedFS).makeQualified( this.getUri(), null); } catch (IOException e) { LOG.error("Unable to get HomeDirectory from original File System", e); cachedHomeDirectory = new Path("/user/" + ugi.getShortUserName()) .makeQualified(this.getUri(), null); } } return cachedHomeDirectory; } @Override public synchronized Path getWorkingDirectory() { return workingDir; } @Override public synchronized void setWorkingDirectory(final Path dir) { Path absolutePath = makeAbsolute(dir); String result = absolutePath.toUri().getPath(); if (!DFSUtilClient.isValidName(result)) { throw new IllegalArgumentException("Invalid DFS directory name " + result); } workingDir = absolutePath; } private Path makeAbsolute(Path f) { return f.isAbsolute()? f: new Path(workingDir, f); } static Map<?, ?> jsonParse(final HttpURLConnection c, final boolean useErrorStream ) throws IOException { if (c.getContentLength() == 0) { return null; } final InputStream in = useErrorStream? c.getErrorStream(): c.getInputStream(); if (in == null) { throw new IOException("The " + (useErrorStream? "error": "input") + " stream is null."); } try { final String contentType = c.getContentType(); if (contentType != null) { final MediaType parsed = MediaType.valueOf(contentType); if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) { throw new IOException("Content-Type \"" + contentType + "\" is incompatible with \"" + MediaType.APPLICATION_JSON + "\" (parsed=\"" + parsed + "\")"); } } ObjectMapper mapper = new ObjectMapper(); return mapper.reader(Map.class).readValue(in); } finally { in.close(); } } private static Map<?, ?> validateResponse(final HttpOpParam.Op op, final HttpURLConnection conn, boolean unwrapException) throws IOException { final int code = conn.getResponseCode(); // server is demanding an authentication we don't support if (code == HttpURLConnection.HTTP_UNAUTHORIZED) { // match hdfs/rpc exception throw new AccessControlException(conn.getResponseMessage()); } if (code != op.getExpectedHttpResponseCode()) { final Map<?, ?> m; try { m = jsonParse(conn, true); } catch(Exception e) { throw new IOException("Unexpected HTTP response: code=" + code + " != " + op.getExpectedHttpResponseCode() + ", " + op.toQueryString() + ", message=" + conn.getResponseMessage(), e); } if (m == null) { throw new IOException("Unexpected HTTP response: code=" + code + " != " + op.getExpectedHttpResponseCode() + ", " + op.toQueryString() + ", message=" + conn.getResponseMessage()); } else if (m.get(RemoteException.class.getSimpleName()) == null) { return m; } IOException re = JsonUtilClient.toRemoteException(m); // extract UGI-related exceptions and unwrap InvalidToken // the NN mangles these exceptions but the DN does not and may need // to re-fetch a token if either report the token is expired if (re.getMessage() != null && re.getMessage().startsWith( SecurityUtil.FAILED_TO_GET_UGI_MSG_HEADER)) { String[] parts = re.getMessage().split(":\\s+", 3); re = new RemoteException(parts[1], parts[2]); re = ((RemoteException)re).unwrapRemoteException(InvalidToken.class); } throw unwrapException? toIOException(re): re; } return null; } /** * Covert an exception to an IOException. * * For a non-IOException, wrap it with IOException. * For a RemoteException, unwrap it. * For an IOException which is not a RemoteException, return it. */ private static IOException toIOException(Exception e) { if (!(e instanceof IOException)) { return new IOException(e); } final IOException ioe = (IOException)e; if (!(ioe instanceof RemoteException)) { return ioe; } return ((RemoteException)ioe).unwrapRemoteException(); } private synchronized InetSocketAddress getCurrentNNAddr() { return nnAddrs[currentNNAddrIndex]; } /** * Reset the appropriate state to gracefully fail over to another name node */ private synchronized void resetStateToFailOver() { currentNNAddrIndex = (currentNNAddrIndex + 1) % nnAddrs.length; } /** * Return a URL pointing to given path on the namenode. * * @param path to obtain the URL for * @param query string to append to the path * @return namenode URL referring to the given path * @throws IOException on error constructing the URL */ private URL getNamenodeURL(String path, String query) throws IOException { InetSocketAddress nnAddr = getCurrentNNAddr(); final URL url = new URL(getTransportScheme(), nnAddr.getHostName(), nnAddr.getPort(), path + '?' + query); if (LOG.isTraceEnabled()) { LOG.trace("url={}", url); } return url; } Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException { List<Param<?,?>> authParams = Lists.newArrayList(); // Skip adding delegation token for token operations because these // operations require authentication. Token<?> token = null; if (!op.getRequireAuth()) { token = getDelegationToken(); } if (token != null) { authParams.add(new DelegationParam(token.encodeToUrlString())); } else { UserGroupInformation userUgi = ugi; UserGroupInformation realUgi = userUgi.getRealUser(); if (realUgi != null) { // proxy user authParams.add(new DoAsParam(userUgi.getShortUserName())); userUgi = realUgi; } authParams.add(new UserParam(userUgi.getShortUserName())); } return authParams.toArray(new Param<?,?>[0]); } URL toUrl(final HttpOpParam.Op op, final Path fspath, final Param<?,?>... parameters) throws IOException { //initialize URI path and query final String path = PATH_PREFIX + (fspath == null? "/": makeQualified(fspath).toUri().getRawPath()); final String query = op.toQueryString() + Param.toSortedString("&", getAuthParameters(op)) + Param.toSortedString("&", parameters); final URL url = getNamenodeURL(path, query); if (LOG.isTraceEnabled()) { LOG.trace("url={}", url); } return url; } /** * This class is for initialing a HTTP connection, connecting to server, * obtaining a response, and also handling retry on failures. */ abstract class AbstractRunner<T> { abstract protected URL getUrl() throws IOException; protected final HttpOpParam.Op op; private final boolean redirected; protected ExcludeDatanodesParam excludeDatanodes = new ExcludeDatanodesParam(""); private boolean checkRetry; protected AbstractRunner(final HttpOpParam.Op op, boolean redirected) { this.op = op; this.redirected = redirected; } T run() throws IOException { UserGroupInformation connectUgi = ugi.getRealUser(); if (connectUgi == null) { connectUgi = ugi; } if (op.getRequireAuth()) { connectUgi.checkTGTAndReloginFromKeytab(); } try { // the entire lifecycle of the connection must be run inside the // doAs to ensure authentication is performed correctly return connectUgi.doAs( new PrivilegedExceptionAction<T>() { @Override public T run() throws IOException { return runWithRetry(); } }); } catch (InterruptedException e) { throw new IOException(e); } } /** * Two-step requests redirected to a DN * * Create/Append: * Step 1) Submit a Http request with neither auto-redirect nor data. * Step 2) Submit another Http request with the URL from the Location header with data. * * The reason of having two-step create/append is for preventing clients to * send out the data before the redirect. This issue is addressed by the * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3. * Unfortunately, there are software library bugs (e.g. Jetty 6 http server * and Java 6 http client), which do not correctly implement "Expect: * 100-continue". The two-step create/append is a temporary workaround for * the software library bugs. * * Open/Checksum * Also implements two-step connects for other operations redirected to * a DN such as open and checksum */ private HttpURLConnection connect(URL url) throws IOException { //redirect hostname and port String redirectHost = null; // resolve redirects for a DN operation unless already resolved if (op.getRedirect() && !redirected) { final HttpOpParam.Op redirectOp = HttpOpParam.TemporaryRedirectOp.valueOf(op); final HttpURLConnection conn = connect(redirectOp, url); // application level proxy like httpfs might not issue a redirect if (conn.getResponseCode() == op.getExpectedHttpResponseCode()) { return conn; } try { validateResponse(redirectOp, conn, false); url = new URL(conn.getHeaderField("Location")); redirectHost = url.getHost() + ":" + url.getPort(); } finally { conn.disconnect(); } } try { return connect(op, url); } catch (IOException ioe) { if (redirectHost != null) { if (excludeDatanodes.getValue() != null) { excludeDatanodes = new ExcludeDatanodesParam(redirectHost + "," + excludeDatanodes.getValue()); } else { excludeDatanodes = new ExcludeDatanodesParam(redirectHost); } } throw ioe; } } private HttpURLConnection connect(final HttpOpParam.Op op, final URL url) throws IOException { final HttpURLConnection conn = (HttpURLConnection)connectionFactory.openConnection(url); final boolean doOutput = op.getDoOutput(); conn.setRequestMethod(op.getType().toString()); conn.setInstanceFollowRedirects(false); switch (op.getType()) { // if not sending a message body for a POST or PUT operation, need // to ensure the server/proxy knows this case POST: case PUT: { conn.setDoOutput(true); if (!doOutput) { // explicitly setting content-length to 0 won't do spnego!! // opening and closing the stream will send "Content-Length: 0" conn.getOutputStream().close(); } else { conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM); conn.setChunkedStreamingMode(32 << 10); //32kB-chunk } break; } default: { conn.setDoOutput(doOutput); break; } } conn.connect(); return conn; } private T runWithRetry() throws IOException { /** * Do the real work. * * There are three cases that the code inside the loop can throw an * IOException: * * <ul> * <li>The connection has failed (e.g., ConnectException, * @see FailoverOnNetworkExceptionRetry for more details)</li> * <li>The namenode enters the standby state (i.e., StandbyException).</li> * <li>The server returns errors for the command (i.e., RemoteException)</li> * </ul> * * The call to shouldRetry() will conduct the retry policy. The policy * examines the exception and swallows it if it decides to rerun the work. */ for(int retry = 0; ; retry++) { checkRetry = !redirected; final URL url = getUrl(); try { final HttpURLConnection conn = connect(url); // output streams will validate on close if (!op.getDoOutput()) { validateResponse(op, conn, false); } return getResponse(conn); } catch (AccessControlException ace) { // no retries for auth failures throw ace; } catch (InvalidToken it) { // try to replace the expired token with a new one. the attempt // to acquire a new token must be outside this operation's retry // so if it fails after its own retries, this operation fails too. if (op.getRequireAuth() || !replaceExpiredDelegationToken()) { throw it; } } catch (IOException ioe) { shouldRetry(ioe, retry); } } } private void shouldRetry(final IOException ioe, final int retry ) throws IOException { InetSocketAddress nnAddr = getCurrentNNAddr(); if (checkRetry) { try { final RetryPolicy.RetryAction a = retryPolicy.shouldRetry( ioe, retry, 0, true); boolean isRetry = a.action == RetryPolicy.RetryAction.RetryDecision.RETRY; boolean isFailoverAndRetry = a.action == RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY; if (isRetry || isFailoverAndRetry) { LOG.info("Retrying connect to namenode: {}. Already tried {}" + " time(s); retry policy is {}, delay {}ms.", nnAddr, retry, retryPolicy, a.delayMillis); if (isFailoverAndRetry) { resetStateToFailOver(); } Thread.sleep(a.delayMillis); return; } } catch(Exception e) { LOG.warn("Original exception is ", ioe); throw toIOException(e); } } throw toIOException(ioe); } abstract T getResponse(HttpURLConnection conn) throws IOException; } /** * Abstract base class to handle path-based operations with params */ abstract class AbstractFsPathRunner<T> extends AbstractRunner<T> { private final Path fspath; private final Param<?,?>[] parameters; AbstractFsPathRunner(final HttpOpParam.Op op, final Path fspath, Param<?,?>... parameters) { super(op, false); this.fspath = fspath; this.parameters = parameters; } AbstractFsPathRunner(final HttpOpParam.Op op, Param<?,?>[] parameters, final Path fspath) { super(op, false); this.fspath = fspath; this.parameters = parameters; } @Override protected URL getUrl() throws IOException { if (excludeDatanodes.getValue() != null) { Param<?, ?>[] tmpParam = new Param<?, ?>[parameters.length + 1]; System.arraycopy(parameters, 0, tmpParam, 0, parameters.length); tmpParam[parameters.length] = excludeDatanodes; return toUrl(op, fspath, tmpParam); } else { return toUrl(op, fspath, parameters); } } } /** * Default path-based implementation expects no json response */ class FsPathRunner extends AbstractFsPathRunner<Void> { FsPathRunner(Op op, Path fspath, Param<?,?>... parameters) { super(op, fspath, parameters); } @Override Void getResponse(HttpURLConnection conn) throws IOException { return null; } } /** * Handle path-based operations with a json response */ abstract class FsPathResponseRunner<T> extends AbstractFsPathRunner<T> { FsPathResponseRunner(final HttpOpParam.Op op, final Path fspath, Param<?,?>... parameters) { super(op, fspath, parameters); } FsPathResponseRunner(final HttpOpParam.Op op, Param<?,?>[] parameters, final Path fspath) { super(op, parameters, fspath); } @Override final T getResponse(HttpURLConnection conn) throws IOException { try { final Map<?,?> json = jsonParse(conn, false); if (json == null) { // match exception class thrown by parser throw new IllegalStateException("Missing response"); } return decodeResponse(json); } catch (IOException ioe) { throw ioe; } catch (Exception e) { // catch json parser errors final IOException ioe = new IOException("Response decoding failure: "+e.toString(), e); if (LOG.isDebugEnabled()) { LOG.debug("Response decoding failure: {}", e.toString(), e); } throw ioe; } finally { conn.disconnect(); } } abstract T decodeResponse(Map<?,?> json) throws IOException; } /** * Handle path-based operations with json boolean response */ class FsPathBooleanRunner extends FsPathResponseRunner<Boolean> { FsPathBooleanRunner(Op op, Path fspath, Param<?,?>... parameters) { super(op, fspath, parameters); } @Override Boolean decodeResponse(Map<?,?> json) throws IOException { return (Boolean)json.get("boolean"); } } /** * Handle create/append output streams */ class FsPathOutputStreamRunner extends AbstractFsPathRunner<FSDataOutputStream> { private final int bufferSize; FsPathOutputStreamRunner(Op op, Path fspath, int bufferSize, Param<?,?>... parameters) { super(op, fspath, parameters); this.bufferSize = bufferSize; } @Override FSDataOutputStream getResponse(final HttpURLConnection conn) throws IOException { return new FSDataOutputStream(new BufferedOutputStream( conn.getOutputStream(), bufferSize), statistics) { @Override public void close() throws IOException { try { super.close(); } finally { try { validateResponse(op, conn, true); } finally { conn.disconnect(); } } } }; } } class FsPathConnectionRunner extends AbstractFsPathRunner<HttpURLConnection> { FsPathConnectionRunner(Op op, Path fspath, Param<?,?>... parameters) { super(op, fspath, parameters); } @Override HttpURLConnection getResponse(final HttpURLConnection conn) throws IOException { return conn; } } /** * Used by open() which tracks the resolved url itself */ final class URLRunner extends AbstractRunner<HttpURLConnection> { private final URL url; @Override protected URL getUrl() { return url; } protected URLRunner(final HttpOpParam.Op op, final URL url, boolean redirected) { super(op, redirected); this.url = url; } @Override HttpURLConnection getResponse(HttpURLConnection conn) throws IOException { return conn; } } private FsPermission applyUMask(FsPermission permission) { if (permission == null) { permission = FsPermission.getDefault(); } return permission.applyUMask(FsPermission.getUMask(getConf())); } private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS; HdfsFileStatus status = new FsPathResponseRunner<HdfsFileStatus>(op, f) { @Override HdfsFileStatus decodeResponse(Map<?,?> json) { return JsonUtilClient.toFileStatus(json, true); } }.run(); if (status == null) { throw new FileNotFoundException("File does not exist: " + f); } return status; } @Override public FileStatus getFileStatus(Path f) throws IOException { statistics.incrementReadOps(1); return makeQualified(getHdfsFileStatus(f), f); } private FileStatus makeQualified(HdfsFileStatus f, Path parent) { return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), f.getBlockSize(), f.getModificationTime(), f.getAccessTime(), f.getPermission(), f.getOwner(), f.getGroup(), f.isSymlink() ? new Path(f.getSymlink()) : null, f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory())); } @Override public AclStatus getAclStatus(Path f) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.GETACLSTATUS; AclStatus status = new FsPathResponseRunner<AclStatus>(op, f) { @Override AclStatus decodeResponse(Map<?,?> json) { return JsonUtilClient.toAclStatus(json); } }.run(); if (status == null) { throw new FileNotFoundException("File does not exist: " + f); } return status; } @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.MKDIRS; return new FsPathBooleanRunner(op, f, new PermissionParam(applyUMask(permission)) ).run(); } /** * Create a symlink pointing to the destination path. */ public void createSymlink(Path destination, Path f, boolean createParent ) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.CREATESYMLINK; new FsPathRunner(op, f, new DestinationParam(makeQualified(destination).toUri().getPath()), new CreateParentParam(createParent) ).run(); } @Override public boolean rename(final Path src, final Path dst) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.RENAME; return new FsPathBooleanRunner(op, src, new DestinationParam(makeQualified(dst).toUri().getPath()) ).run(); } @SuppressWarnings("deprecation") @Override public void rename(final Path src, final Path dst, final Options.Rename... options) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.RENAME; new FsPathRunner(op, src, new DestinationParam(makeQualified(dst).toUri().getPath()), new RenameOptionSetParam(options) ).run(); } @Override public void setXAttr(Path p, String name, byte[] value, EnumSet<XAttrSetFlag> flag) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.SETXATTR; if (value != null) { new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrValueParam( XAttrCodec.encodeValue(value, XAttrCodec.HEX)), new XAttrSetFlagParam(flag)).run(); } else { new FsPathRunner(op, p, new XAttrNameParam(name), new XAttrSetFlagParam(flag)).run(); } } @Override public byte[] getXAttr(Path p, final String name) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS; return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name), new XAttrEncodingParam(XAttrCodec.HEX)) { @Override byte[] decodeResponse(Map<?, ?> json) throws IOException { return JsonUtilClient.getXAttr(json); } }.run(); } @Override public Map<String, byte[]> getXAttrs(Path p) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS; return new FsPathResponseRunner<Map<String, byte[]>>(op, p, new XAttrEncodingParam(XAttrCodec.HEX)) { @Override Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException { return JsonUtilClient.toXAttrs(json); } }.run(); } @Override public Map<String, byte[]> getXAttrs(Path p, final List<String> names) throws IOException { Preconditions.checkArgument(names != null && !names.isEmpty(), "XAttr names cannot be null or empty."); Param<?,?>[] parameters = new Param<?,?>[names.size() + 1]; for (int i = 0; i < parameters.length - 1; i++) { parameters[i] = new XAttrNameParam(names.get(i)); } parameters[parameters.length - 1] = new XAttrEncodingParam(XAttrCodec.HEX); final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS; return new FsPathResponseRunner<Map<String, byte[]>>(op, parameters, p) { @Override Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException { return JsonUtilClient.toXAttrs(json); } }.run(); } @Override public List<String> listXAttrs(Path p) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.LISTXATTRS; return new FsPathResponseRunner<List<String>>(op, p) { @Override List<String> decodeResponse(Map<?, ?> json) throws IOException { return JsonUtilClient.toXAttrNames(json); } }.run(); } @Override public void removeXAttr(Path p, String name) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.REMOVEXATTR; new FsPathRunner(op, p, new XAttrNameParam(name)).run(); } @Override public void setOwner(final Path p, final String owner, final String group ) throws IOException { if (owner == null && group == null) { throw new IOException("owner == null && group == null"); } statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.SETOWNER; new FsPathRunner(op, p, new OwnerParam(owner), new GroupParam(group) ).run(); } @Override public void setPermission(final Path p, final FsPermission permission ) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION; new FsPathRunner(op, p,new PermissionParam(permission)).run(); } @Override public void modifyAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.MODIFYACLENTRIES; new FsPathRunner(op, path, new AclPermissionParam(aclSpec)).run(); } @Override public void removeAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.REMOVEACLENTRIES; new FsPathRunner(op, path, new AclPermissionParam(aclSpec)).run(); } @Override public void removeDefaultAcl(Path path) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.REMOVEDEFAULTACL; new FsPathRunner(op, path).run(); } @Override public void removeAcl(Path path) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.REMOVEACL; new FsPathRunner(op, path).run(); } @Override public void setAcl(final Path p, final List<AclEntry> aclSpec) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.SETACL; new FsPathRunner(op, p, new AclPermissionParam(aclSpec)).run(); } @Override public Path createSnapshot(final Path path, final String snapshotName) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.CREATESNAPSHOT; Path spath = new FsPathResponseRunner<Path>(op, path, new SnapshotNameParam(snapshotName)) { @Override Path decodeResponse(Map<?,?> json) { return new Path((String) json.get(Path.class.getSimpleName())); } }.run(); return spath; } @Override public void deleteSnapshot(final Path path, final String snapshotName) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = DeleteOpParam.Op.DELETESNAPSHOT; new FsPathRunner(op, path, new SnapshotNameParam(snapshotName)).run(); } @Override public void renameSnapshot(final Path path, final String snapshotOldName, final String snapshotNewName) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.RENAMESNAPSHOT; new FsPathRunner(op, path, new OldSnapshotNameParam(snapshotOldName), new SnapshotNameParam(snapshotNewName)).run(); } @Override public boolean setReplication(final Path p, final short replication ) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION; return new FsPathBooleanRunner(op, p, new ReplicationParam(replication) ).run(); } @Override public void setTimes(final Path p, final long mtime, final long atime ) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.SETTIMES; new FsPathRunner(op, p, new ModificationTimeParam(mtime), new AccessTimeParam(atime) ).run(); } @Override public long getDefaultBlockSize() { return getConf().getLongBytes(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY, HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT); } @Override public short getDefaultReplication() { return (short)getConf().getInt(HdfsClientConfigKeys.DFS_REPLICATION_KEY, HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT); } @Override public void concat(final Path trg, final Path [] srcs) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PostOpParam.Op.CONCAT; new FsPathRunner(op, trg, new ConcatSourcesParam(srcs)).run(); } @Override public FSDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, final Progressable progress) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PutOpParam.Op.CREATE; return new FsPathOutputStreamRunner(op, f, bufferSize, new PermissionParam(applyUMask(permission)), new OverwriteParam(overwrite), new BufferSizeParam(bufferSize), new ReplicationParam(replication), new BlockSizeParam(blockSize) ).run(); } @Override public FSDataOutputStream append(final Path f, final int bufferSize, final Progressable progress) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PostOpParam.Op.APPEND; return new FsPathOutputStreamRunner(op, f, bufferSize, new BufferSizeParam(bufferSize) ).run(); } @Override public boolean truncate(Path f, long newLength) throws IOException { statistics.incrementWriteOps(1); final HttpOpParam.Op op = PostOpParam.Op.TRUNCATE; return new FsPathBooleanRunner(op, f, new NewLengthParam(newLength)).run(); } @Override public boolean delete(Path f, boolean recursive) throws IOException { final HttpOpParam.Op op = DeleteOpParam.Op.DELETE; return new FsPathBooleanRunner(op, f, new RecursiveParam(recursive) ).run(); } @Override public FSDataInputStream open(final Path f, final int buffersize ) throws IOException { statistics.incrementReadOps(1); final HttpOpParam.Op op = GetOpParam.Op.OPEN; // use a runner so the open can recover from an invalid token FsPathConnectionRunner runner = new FsPathConnectionRunner(op, f, new BufferSizeParam(buffersize)); return new FSDataInputStream(new OffsetUrlInputStream( new UnresolvedUrlOpener(runner), new OffsetUrlOpener(null))); } @Override public synchronized void close() throws IOException { try { if (canRefreshDelegationToken && delegationToken != null) { cancelDelegationToken(delegationToken); } } catch (IOException ioe) { if (LOG.isDebugEnabled()) { LOG.debug("Token cancel failed: ", ioe); } } finally { super.close(); } } // use FsPathConnectionRunner to ensure retries for InvalidTokens class UnresolvedUrlOpener extends ByteRangeInputStream.URLOpener { private final FsPathConnectionRunner runner; UnresolvedUrlOpener(FsPathConnectionRunner runner) { super(null); this.runner = runner; } @Override protected HttpURLConnection connect(long offset, boolean resolved) throws IOException { assert offset == 0; HttpURLConnection conn = runner.run(); setURL(conn.getURL()); return conn; } } class OffsetUrlOpener extends ByteRangeInputStream.URLOpener { OffsetUrlOpener(final URL url) { super(url); } /** Setup offset url and connect. */ @Override protected HttpURLConnection connect(final long offset, final boolean resolved) throws IOException { final URL offsetUrl = offset == 0L? url : new URL(url + "&" + new OffsetParam(offset)); return new URLRunner(GetOpParam.Op.OPEN, offsetUrl, resolved).run(); } } private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "="; /** Remove offset parameter, if there is any, from the url */ static URL removeOffsetParam(final URL url) throws MalformedURLException { String query = url.getQuery(); if (query == null) { return url; } final String lower = StringUtils.toLowerCase(query); if (!lower.startsWith(OFFSET_PARAM_PREFIX) && !lower.contains("&" + OFFSET_PARAM_PREFIX)) { return url; } //rebuild query StringBuilder b = null; for(final StringTokenizer st = new StringTokenizer(query, "&"); st.hasMoreTokens();) { final String token = st.nextToken(); if (!StringUtils.toLowerCase(token).startsWith(OFFSET_PARAM_PREFIX)) { if (b == null) { b = new StringBuilder("?").append(token); } else { b.append('&').append(token); } } } query = b == null? "": b.toString(); final String urlStr = url.toString(); return new URL(urlStr.substring(0, urlStr.indexOf('?')) + query); } static class OffsetUrlInputStream extends ByteRangeInputStream { OffsetUrlInputStream(UnresolvedUrlOpener o, OffsetUrlOpener r) throws IOException { super(o, r); } /** Remove offset parameter before returning the resolved url. */ @Override protected URL getResolvedUrl(final HttpURLConnection connection ) throws MalformedURLException { return removeOffsetParam(connection.getURL()); } } @Override public FileStatus[] listStatus(final Path f) throws IOException { statistics.incrementReadOps(1); final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS; return new FsPathResponseRunner<FileStatus[]>(op, f) { @Override FileStatus[] decodeResponse(Map<?,?> json) { final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es"); final List<?> array = JsonUtilClient.getList(rootmap, FileStatus.class.getSimpleName()); //convert FileStatus final FileStatus[] statuses = new FileStatus[array.size()]; int i = 0; for (Object object : array) { final Map<?, ?> m = (Map<?, ?>) object; statuses[i++] = makeQualified(JsonUtilClient.toFileStatus(m, false), f); } return statuses; } }.run(); } @Override public Token<DelegationTokenIdentifier> getDelegationToken( final String renewer) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN; Token<DelegationTokenIdentifier> token = new FsPathResponseRunner<Token<DelegationTokenIdentifier>>( op, null, new RenewerParam(renewer)) { @Override Token<DelegationTokenIdentifier> decodeResponse(Map<?,?> json) throws IOException { return JsonUtilClient.toDelegationToken(json); } }.run(); if (token != null) { token.setService(tokenServiceName); } else { if (disallowFallbackToInsecureCluster) { throw new AccessControlException(CANT_FALLBACK_TO_INSECURE_MSG); } } return token; } @Override public synchronized Token<?> getRenewToken() { return delegationToken; } @Override public <T extends TokenIdentifier> void setDelegationToken( final Token<T> token) { synchronized (this) { delegationToken = token; } } @Override public synchronized long renewDelegationToken(final Token<?> token ) throws IOException { final HttpOpParam.Op op = PutOpParam.Op.RENEWDELEGATIONTOKEN; return new FsPathResponseRunner<Long>(op, null, new TokenArgumentParam(token.encodeToUrlString())) { @Override Long decodeResponse(Map<?,?> json) throws IOException { return ((Number) json.get("long")).longValue(); } }.run(); } @Override public synchronized void cancelDelegationToken(final Token<?> token ) throws IOException { final HttpOpParam.Op op = PutOpParam.Op.CANCELDELEGATIONTOKEN; new FsPathRunner(op, null, new TokenArgumentParam(token.encodeToUrlString()) ).run(); } @Override public BlockLocation[] getFileBlockLocations(final FileStatus status, final long offset, final long length) throws IOException { if (status == null) { return null; } return getFileBlockLocations(status.getPath(), offset, length); } @Override public BlockLocation[] getFileBlockLocations(final Path p, final long offset, final long length) throws IOException { statistics.incrementReadOps(1); final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS; return new FsPathResponseRunner<BlockLocation[]>(op, p, new OffsetParam(offset), new LengthParam(length)) { @Override BlockLocation[] decodeResponse(Map<?,?> json) throws IOException { return DFSUtilClient.locatedBlocks2Locations( JsonUtilClient.toLocatedBlocks(json)); } }.run(); } @Override public void access(final Path path, final FsAction mode) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS; new FsPathRunner(op, path, new FsActionParam(mode)).run(); } @Override public ContentSummary getContentSummary(final Path p) throws IOException { statistics.incrementReadOps(1); final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY; return new FsPathResponseRunner<ContentSummary>(op, p) { @Override ContentSummary decodeResponse(Map<?,?> json) { return JsonUtilClient.toContentSummary(json); } }.run(); } @Override public MD5MD5CRC32FileChecksum getFileChecksum(final Path p ) throws IOException { statistics.incrementReadOps(1); final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM; return new FsPathResponseRunner<MD5MD5CRC32FileChecksum>(op, p) { @Override MD5MD5CRC32FileChecksum decodeResponse(Map<?,?> json) throws IOException { return JsonUtilClient.toMD5MD5CRC32FileChecksum(json); } }.run(); } /** * Resolve an HDFS URL into real INetSocketAddress. It works like a DNS * resolver when the URL points to an non-HA cluster. When the URL points to * an HA cluster with its logical name, the resolver further resolves the * logical name(i.e., the authority in the URL) into real namenode addresses. */ private InetSocketAddress[] resolveNNAddr() throws IOException { Configuration conf = getConf(); final String scheme = uri.getScheme(); ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>(); if (!HAUtilClient.isLogicalUri(conf, uri)) { InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); ret.add(addr); } else { Map<String, Map<String, InetSocketAddress>> addresses = DFSUtilClient .getHaNnWebHdfsAddresses(conf, scheme); // Extract the entry corresponding to the logical name. Map<String, InetSocketAddress> addrs = addresses.get(uri.getHost()); for (InetSocketAddress addr : addrs.values()) { ret.add(addr); } } InetSocketAddress[] r = new InetSocketAddress[ret.size()]; return ret.toArray(r); } @Override public String getCanonicalServiceName() { return tokenServiceName == null ? super.getCanonicalServiceName() : tokenServiceName.toString(); } @VisibleForTesting InetSocketAddress[] getResolvedNNAddr() { return nnAddrs; } }
51,443
33.410702
98
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web; import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; import java.net.URLConnection; import java.security.GeneralSecurityException; import javax.net.ssl.HostnameVerifier; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLSocketFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; import org.apache.hadoop.security.ssl.SSLFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; /** * Utilities for handling URLs */ @InterfaceAudience.LimitedPrivate({ "HDFS" }) @InterfaceStability.Unstable public class URLConnectionFactory { private static final Logger LOG = LoggerFactory .getLogger(URLConnectionFactory.class); /** * Timeout for socket connects and reads */ public final static int DEFAULT_SOCKET_TIMEOUT = 1 * 60 * 1000; // 1 minute private final ConnectionConfigurator connConfigurator; private static final ConnectionConfigurator DEFAULT_TIMEOUT_CONN_CONFIGURATOR = new ConnectionConfigurator() { @Override public HttpURLConnection configure(HttpURLConnection conn) throws IOException { URLConnectionFactory.setTimeouts(conn, DEFAULT_SOCKET_TIMEOUT); return conn; } }; /** * The URLConnectionFactory that sets the default timeout and it only trusts * Java's SSL certificates. */ public static final URLConnectionFactory DEFAULT_SYSTEM_CONNECTION_FACTORY = new URLConnectionFactory( DEFAULT_TIMEOUT_CONN_CONFIGURATOR); /** * Construct a new URLConnectionFactory based on the configuration. It will * try to load SSL certificates when it is specified. */ public static URLConnectionFactory newDefaultURLConnectionFactory(Configuration conf) { ConnectionConfigurator conn = null; try { conn = newSslConnConfigurator(DEFAULT_SOCKET_TIMEOUT, conf); } catch (Exception e) { LOG.debug( "Cannot load customized ssl related configuration. Fallback to system-generic settings.", e); conn = DEFAULT_TIMEOUT_CONN_CONFIGURATOR; } return new URLConnectionFactory(conn); } @VisibleForTesting URLConnectionFactory(ConnectionConfigurator connConfigurator) { this.connConfigurator = connConfigurator; } /** * Create a new ConnectionConfigurator for SSL connections */ private static ConnectionConfigurator newSslConnConfigurator(final int timeout, Configuration conf) throws IOException, GeneralSecurityException { final SSLFactory factory; final SSLSocketFactory sf; final HostnameVerifier hv; factory = new SSLFactory(SSLFactory.Mode.CLIENT, conf); factory.init(); sf = factory.createSSLSocketFactory(); hv = factory.getHostnameVerifier(); return new ConnectionConfigurator() { @Override public HttpURLConnection configure(HttpURLConnection conn) throws IOException { if (conn instanceof HttpsURLConnection) { HttpsURLConnection c = (HttpsURLConnection) conn; c.setSSLSocketFactory(sf); c.setHostnameVerifier(hv); } URLConnectionFactory.setTimeouts(conn, timeout); return conn; } }; } /** * Opens a url with read and connect timeouts * * @param url * to open * @return URLConnection * @throws IOException */ public URLConnection openConnection(URL url) throws IOException { try { return openConnection(url, false); } catch (AuthenticationException e) { // Unreachable return null; } } /** * Opens a url with read and connect timeouts * * @param url * URL to open * @param isSpnego * whether the url should be authenticated via SPNEGO * @return URLConnection * @throws IOException * @throws AuthenticationException */ public URLConnection openConnection(URL url, boolean isSpnego) throws IOException, AuthenticationException { if (isSpnego) { if (LOG.isDebugEnabled()) { LOG.debug("open AuthenticatedURL connection {}", url); } UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab(); final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token(); return new AuthenticatedURL(new KerberosUgiAuthenticator(), connConfigurator).openConnection(url, authToken); } else { LOG.debug("open URL connection"); URLConnection connection = url.openConnection(); if (connection instanceof HttpURLConnection) { connConfigurator.configure((HttpURLConnection) connection); } return connection; } } /** * Sets timeout parameters on the given URLConnection. * * @param connection * URLConnection to set * @param socketTimeout * the connection and read timeout of the connection. */ private static void setTimeouts(URLConnection connection, int socketTimeout) { connection.setConnectTimeout(socketTimeout); connection.setReadTimeout(socketTimeout); } }
6,306
32.727273
112
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/KerberosUgiAuthenticator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web; import java.io.IOException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.Authenticator; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import org.apache.hadoop.security.authentication.client.PseudoAuthenticator; /** * Use UserGroupInformation as a fallback authenticator * if the server does not use Kerberos SPNEGO HTTP authentication. */ public class KerberosUgiAuthenticator extends KerberosAuthenticator { @Override protected Authenticator getFallBackAuthenticator() { return new PseudoAuthenticator() { @Override protected String getUserName() { try { return UserGroupInformation.getLoginUser().getUserName(); } catch (IOException e) { throw new SecurityException("Failed to obtain current username", e); } } }; } }
1,745
36.956522
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.io.Text; import com.google.common.annotations.VisibleForTesting; public class SWebHdfsFileSystem extends WebHdfsFileSystem { @Override public String getScheme() { return WebHdfsConstants.SWEBHDFS_SCHEME; } @Override protected String getTransportScheme() { return "https"; } @Override protected Text getTokenKind() { return WebHdfsConstants.SWEBHDFS_TOKEN_KIND; } @VisibleForTesting @Override public int getDefaultPort() { return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT); } }
1,539
30.428571
77
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BooleanParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Boolean parameter. */ abstract class BooleanParam extends Param<Boolean, BooleanParam.Domain> { static final String TRUE = "true"; static final String FALSE = "false"; /** @return the parameter value as a string */ @Override public String getValueString() { return value.toString(); } BooleanParam(final Domain domain, final Boolean value) { super(domain, value); } /** The domain of the parameter. */ static final class Domain extends Param.Domain<Boolean> { Domain(final String paramName) { super(paramName); } @Override public String getDomain() { return "<" + NULL + " | boolean>"; } @Override Boolean parse(final String str) { if (TRUE.equalsIgnoreCase(str)) { return true; } else if (FALSE.equalsIgnoreCase(str)) { return false; } throw new IllegalArgumentException("Failed to parse \"" + str + "\" to Boolean."); } } }
1,813
30.275862
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.net.HttpURLConnection; /** Http POST operation parameter. */ public class PutOpParam extends HttpOpParam<PutOpParam.Op> { /** Put operations. */ public static enum Op implements HttpOpParam.Op { CREATE(true, HttpURLConnection.HTTP_CREATED), MKDIRS(false, HttpURLConnection.HTTP_OK), CREATESYMLINK(false, HttpURLConnection.HTTP_OK), RENAME(false, HttpURLConnection.HTTP_OK), SETREPLICATION(false, HttpURLConnection.HTTP_OK), SETOWNER(false, HttpURLConnection.HTTP_OK), SETPERMISSION(false, HttpURLConnection.HTTP_OK), SETTIMES(false, HttpURLConnection.HTTP_OK), RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true), CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true), MODIFYACLENTRIES(false, HttpURLConnection.HTTP_OK), REMOVEACLENTRIES(false, HttpURLConnection.HTTP_OK), REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK), REMOVEACL(false, HttpURLConnection.HTTP_OK), SETACL(false, HttpURLConnection.HTTP_OK), SETXATTR(false, HttpURLConnection.HTTP_OK), REMOVEXATTR(false, HttpURLConnection.HTTP_OK), CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK), RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK), NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); final boolean doOutputAndRedirect; final int expectedHttpResponseCode; final boolean requireAuth; Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode) { this(doOutputAndRedirect, expectedHttpResponseCode, false); } Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode, final boolean requireAuth) { this.doOutputAndRedirect = doOutputAndRedirect; this.expectedHttpResponseCode = expectedHttpResponseCode; this.requireAuth = requireAuth; } @Override public HttpOpParam.Type getType() { return HttpOpParam.Type.PUT; } @Override public boolean getRequireAuth() { return requireAuth; } @Override public boolean getDoOutput() { return doOutputAndRedirect; } @Override public boolean getRedirect() { return doOutputAndRedirect; } @Override public int getExpectedHttpResponseCode() { return expectedHttpResponseCode; } @Override public String toQueryString() { return NAME + "=" + this; } } private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class); /** * Constructor. * @param str a string representation of the parameter value. */ public PutOpParam(final String str) { super(DOMAIN, DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
3,549
30.140351
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.util.EnumSet; import org.apache.hadoop.fs.XAttrSetFlag; public class XAttrSetFlagParam extends EnumSetParam<XAttrSetFlag> { /** Parameter name. */ public static final String NAME = "flag"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain<XAttrSetFlag> DOMAIN = new Domain<XAttrSetFlag>( NAME, XAttrSetFlag.class); public XAttrSetFlagParam(final EnumSet<XAttrSetFlag> flag) { super(DOMAIN, flag); } /** * Constructor. * @param str a string representation of the parameter value. */ public XAttrSetFlagParam(final String str) { super(DOMAIN, DOMAIN.parse(str)); } @Override public String getName() { return NAME; } public EnumSet<XAttrSetFlag> getFlag() { return getValue(); } }
1,663
29.814815
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Offset parameter. */ public class OffsetParam extends LongParam { /** Parameter name. */ public static final String NAME = "offset"; /** Default parameter value. */ public static final String DEFAULT = "0"; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public OffsetParam(final Long value) { super(DOMAIN, value, 0L, null); } /** * Constructor. * @param str a string representation of the parameter value. */ public OffsetParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } public Long getOffset() { Long offset = getValue(); return (offset == null) ? Long.valueOf(0) : offset; } }
1,633
29.259259
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Owner parameter. */ public class OwnerParam extends StringParam { /** Parameter name. */ public static final String NAME = "owner"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, null); /** * Constructor. * @param str a string representation of the parameter value. */ public OwnerParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT)? null: str); } @Override public String getName() { return NAME; } }
1,403
33.243902
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Create Parent parameter. */ public class CreateParentParam extends BooleanParam { /** Parameter name. */ public static final String NAME = "createparent"; /** Default parameter value. */ public static final String DEFAULT = FALSE; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public CreateParentParam(final Boolean value) { super(DOMAIN, value); } /** * Constructor. * @param str a string representation of the parameter value. */ public CreateParentParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
1,543
30.510204
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.util.Arrays; import java.util.Collections; import java.util.List; import javax.ws.rs.core.Response; /** Http operation parameter. */ public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op> extends EnumParam<E> { /** Parameter name. */ public static final String NAME = "op"; /** Default parameter value. */ public static final String DEFAULT = NULL; /** Http operation types */ public static enum Type { GET, PUT, POST, DELETE; } /** Http operation interface. */ public static interface Op { /** @return the Http operation type. */ public Type getType(); /** @return true if the operation cannot use a token */ public boolean getRequireAuth(); /** @return true if the operation will do output. */ public boolean getDoOutput(); /** @return true if the operation will be redirected. */ public boolean getRedirect(); /** @return true the expected http response code. */ public int getExpectedHttpResponseCode(); /** @return a URI query string. */ public String toQueryString(); } /** Expects HTTP response 307 "Temporary Redirect". */ public static class TemporaryRedirectOp implements Op { static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp( PutOpParam.Op.CREATE); static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp( PostOpParam.Op.APPEND); static final TemporaryRedirectOp OPEN = new TemporaryRedirectOp( GetOpParam.Op.OPEN); static final TemporaryRedirectOp GETFILECHECKSUM = new TemporaryRedirectOp( GetOpParam.Op.GETFILECHECKSUM); static final List<TemporaryRedirectOp> values = Collections.unmodifiableList(Arrays.asList(CREATE, APPEND, OPEN, GETFILECHECKSUM)); /** Get an object for the given op. */ public static TemporaryRedirectOp valueOf(final Op op) { for(TemporaryRedirectOp t : values) { if (op == t.op) { return t; } } throw new IllegalArgumentException(op + " not found."); } private final Op op; private TemporaryRedirectOp(final Op op) { this.op = op; } @Override public Type getType() { return op.getType(); } @Override public boolean getRequireAuth() { return op.getRequireAuth(); } @Override public boolean getDoOutput() { return false; } @Override public boolean getRedirect() { return false; } /** Override the original expected response with "Temporary Redirect". */ @Override public int getExpectedHttpResponseCode() { return Response.Status.TEMPORARY_REDIRECT.getStatusCode(); } @Override public String toQueryString() { return op.toQueryString(); } } /** @return the parameter value as a string */ @Override public String getValueString() { return value.toString(); } HttpOpParam(final Domain<E> domain, final E value) { super(domain, value); } }
3,891
28.044776
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.net.HttpURLConnection; /** Http DELETE operation parameter. */ public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> { /** Delete operations. */ public static enum Op implements HttpOpParam.Op { DELETE(HttpURLConnection.HTTP_OK), DELETESNAPSHOT(HttpURLConnection.HTTP_OK), NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED); final int expectedHttpResponseCode; Op(final int expectedHttpResponseCode) { this.expectedHttpResponseCode = expectedHttpResponseCode; } @Override public HttpOpParam.Type getType() { return HttpOpParam.Type.DELETE; } @Override public boolean getRequireAuth() { return false; } @Override public boolean getDoOutput() { return false; } @Override public boolean getRedirect() { return false; } @Override public int getExpectedHttpResponseCode() { return expectedHttpResponseCode; } @Override public String toQueryString() { return NAME + "=" + this; } } private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class); /** * Constructor. * @param str a string representation of the parameter value. */ public DeleteOpParam(final String str) { super(DOMAIN, DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
2,220
26.085366
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.net.HttpURLConnection; /** Http GET operation parameter. */ public class GetOpParam extends HttpOpParam<GetOpParam.Op> { /** Get operations. */ public static enum Op implements HttpOpParam.Op { OPEN(true, HttpURLConnection.HTTP_OK), GETFILESTATUS(false, HttpURLConnection.HTTP_OK), LISTSTATUS(false, HttpURLConnection.HTTP_OK), GETCONTENTSUMMARY(false, HttpURLConnection.HTTP_OK), GETFILECHECKSUM(true, HttpURLConnection.HTTP_OK), GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK), GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true), /** GET_BLOCK_LOCATIONS is a private unstable op. */ GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK), GETACLSTATUS(false, HttpURLConnection.HTTP_OK), GETXATTRS(false, HttpURLConnection.HTTP_OK), LISTXATTRS(false, HttpURLConnection.HTTP_OK), NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED), CHECKACCESS(false, HttpURLConnection.HTTP_OK); final boolean redirect; final int expectedHttpResponseCode; final boolean requireAuth; Op(final boolean redirect, final int expectedHttpResponseCode) { this(redirect, expectedHttpResponseCode, false); } Op(final boolean redirect, final int expectedHttpResponseCode, final boolean requireAuth) { this.redirect = redirect; this.expectedHttpResponseCode = expectedHttpResponseCode; this.requireAuth = requireAuth; } @Override public HttpOpParam.Type getType() { return HttpOpParam.Type.GET; } @Override public boolean getRequireAuth() { return requireAuth; } @Override public boolean getDoOutput() { return false; } @Override public boolean getRedirect() { return redirect; } @Override public int getExpectedHttpResponseCode() { return expectedHttpResponseCode; } @Override public String toQueryString() { return NAME + "=" + this; } } private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class); /** * Constructor. * @param str a string representation of the parameter value. */ public GetOpParam(final String str) { super(DOMAIN, DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
3,149
28.716981
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Modification time parameter. */ public class ModificationTimeParam extends LongParam { /** Parameter name. */ public static final String NAME = "modificationtime"; /** Default parameter value. */ public static final String DEFAULT = "-1"; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public ModificationTimeParam(final Long value) { super(DOMAIN, value, -1L, null); } /** * Constructor. * @param str a string representation of the parameter value. */ public ModificationTimeParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
1,567
31
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import org.apache.hadoop.fs.permission.FsPermission; /** Permission parameter, use a Short to represent a FsPermission. */ public class PermissionParam extends ShortParam { /** Parameter name. */ public static final String NAME = "permission"; /** Default parameter value. */ public static final String DEFAULT = NULL; private static final Domain DOMAIN = new Domain(NAME, 8); private static final short DEFAULT_PERMISSION = 0755; /** @return the default FsPermission. */ public static FsPermission getDefaultFsPermission() { return new FsPermission(DEFAULT_PERMISSION); } /** * Constructor. * @param value the parameter value. */ public PermissionParam(final FsPermission value) { super(DOMAIN, value == null? null: value.toShort(), null, null); } /** * Constructor. * @param str a string representation of the parameter value. */ public PermissionParam(final String str) { super(DOMAIN, DOMAIN.parse(str), (short)0, (short)01777); } @Override public String getName() { return NAME; } /** @return the represented FsPermission. */ public FsPermission getFsPermission() { final Short v = getValue(); return new FsPermission(v != null? v: DEFAULT_PERMISSION); } }
2,107
31.9375
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Overwrite parameter. */ public class OverwriteParam extends BooleanParam { /** Parameter name. */ public static final String NAME = "overwrite"; /** Default parameter value. */ public static final String DEFAULT = FALSE; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public OverwriteParam(final Boolean value) { super(DOMAIN, value); } /** * Constructor. * @param str a string representation of the parameter value. */ public OverwriteParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
1,527
30.183673
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; /** Buffer size parameter. */ public class BufferSizeParam extends IntegerParam { /** Parameter name. */ public static final String NAME = "buffersize"; /** Default parameter value. */ public static final String DEFAULT = NULL; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public BufferSizeParam(final Integer value) { super(DOMAIN, value, 1, null); } /** * Constructor. * @param str a string representation of the parameter value. */ public BufferSizeParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } /** @return the value or, if it is null, return the default from conf. */ public int getValue(final Configuration conf) { return getValue() != null? getValue() : conf.getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); } }
1,981
32.033333
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import org.apache.hadoop.fs.Path; /** Destination path parameter. */ public class DestinationParam extends StringParam { /** Parameter name. */ public static final String NAME = "destination"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, null); private static String validate(final String str) { if (str == null || str.equals(DEFAULT)) { return null; } if (!str.startsWith(Path.SEPARATOR)) { throw new IllegalArgumentException("Invalid parameter value: " + NAME + " = \"" + str + "\" is not an absolute path."); } return new Path(str).toUri().getPath(); } /** * Constructor. * @param str a string representation of the parameter value. */ public DestinationParam(final String str) { super(DOMAIN, validate(str)); } @Override public String getName() { return NAME; } }
1,794
31.636364
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Access time parameter. */ public class AccessTimeParam extends LongParam { /** Parameter name. */ public static final String NAME = "accesstime"; /** Default parameter value. */ public static final String DEFAULT = "-1"; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public AccessTimeParam(final Long value) { super(DOMAIN, value, -1L, null); } /** * Constructor. * @param str a string representation of the parameter value. */ public AccessTimeParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
1,537
30.387755
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.io.IOException; import org.apache.hadoop.fs.XAttrCodec; public class XAttrValueParam extends StringParam { /** Parameter name. **/ public static final String NAME = "xattr.value"; /** Default parameter value. **/ public static final String DEFAULT = ""; private static Domain DOMAIN = new Domain(NAME, null); public XAttrValueParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str); } @Override public String getName() { return NAME; } public byte[] getXAttrValue() throws IOException { final String v = getValue(); return XAttrCodec.decodeValue(v); } }
1,499
31.608696
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import org.apache.hadoop.fs.Path; /** The concat source paths parameter. */ public class ConcatSourcesParam extends StringParam { /** Parameter name. */ public static final String NAME = "sources"; public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, null); private static String paths2String(Path[] paths) { if (paths == null || paths.length == 0) { return ""; } final StringBuilder b = new StringBuilder(paths[0].toUri().getPath()); for(int i = 1; i < paths.length; i++) { b.append(',').append(paths[i].toUri().getPath()); } return b.toString(); } /** * Constructor. * @param str a string representation of the parameter value. */ public ConcatSourcesParam(String str) { super(DOMAIN, str); } public ConcatSourcesParam(Path[] paths) { this(paths2String(paths)); } @Override public String getName() { return NAME; } /** @return the absolute path. */ public final String[] getAbsolutePaths() { final String[] paths = getValue().split(","); return paths; } }
1,962
28.742424
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import org.apache.hadoop.fs.permission.FsAction; import java.util.regex.Pattern; /** {@link FsAction} Parameter */ public class FsActionParam extends StringParam { /** Parameter name. */ public static final String NAME = "fsaction"; /** Default parameter value. */ public static final String DEFAULT = NULL; private static String FS_ACTION_PATTERN = "[r-][w-][x-]"; private static final Domain DOMAIN = new Domain(NAME, Pattern.compile(FS_ACTION_PATTERN)); /** * Constructor. * @param str a string representation of the parameter value. */ public FsActionParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT)? null: str); } /** * Constructor. * @param value the parameter value. */ public FsActionParam(final FsAction value) { super(DOMAIN, value == null? null: value.SYMBOL); } @Override public String getName() { return NAME; } }
1,782
29.220339
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_KEY; import org.apache.hadoop.conf.Configuration; /** Replication parameter. */ public class ReplicationParam extends ShortParam { /** Parameter name. */ public static final String NAME = "replication"; /** Default parameter value. */ public static final String DEFAULT = NULL; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public ReplicationParam(final Short value) { super(DOMAIN, value, (short)1, null); } /** * Constructor. * @param str a string representation of the parameter value. */ public ReplicationParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } /** @return the value or, if it is null, return the default from conf. */ public short getValue(final Configuration conf) { return getValue() != null? getValue() : (short)conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT); } }
2,022
32.716667
89
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** NewLength parameter. */ public class NewLengthParam extends LongParam { /** Parameter name. */ public static final String NAME = "newlength"; /** Default parameter value. */ public static final String DEFAULT = NULL; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public NewLengthParam(final Long value) { super(DOMAIN, value, 0L, null); } /** * Constructor. * @param str a string representation of the parameter value. */ public NewLengthParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
1,530
30.244898
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Long parameter. */ abstract class LongParam extends Param<Long, LongParam.Domain> { LongParam(final Domain domain, final Long value, final Long min, final Long max) { super(domain, value); checkRange(min, max); } private void checkRange(final Long min, final Long max) { if (value == null) { return; } if (min != null && value < min) { throw new IllegalArgumentException("Invalid parameter range: " + getName() + " = " + domain.toString(value) + " < " + domain.toString(min)); } if (max != null && value > max) { throw new IllegalArgumentException("Invalid parameter range: " + getName() + " = " + domain.toString(value) + " > " + domain.toString(max)); } } @Override public String toString() { return getName() + "=" + domain.toString(getValue()); } /** @return the parameter value as a string */ @Override public String getValueString() { return domain.toString(getValue()); } /** The domain of the parameter. */ static final class Domain extends Param.Domain<Long> { /** The radix of the number. */ final int radix; Domain(final String paramName) { this(paramName, 10); } Domain(final String paramName, final int radix) { super(paramName); this.radix = radix; } @Override public String getDomain() { return "<" + NULL + " | long in radix " + radix + ">"; } @Override Long parse(final String str) { try { return NULL.equals(str) || str == null ? null: Long.parseLong(str, radix); } catch(NumberFormatException e) { throw new IllegalArgumentException("Failed to parse \"" + str + "\" as a radix-" + radix + " long integer.", e); } } /** Convert a Long to a String. */ String toString(final Long n) { return n == null? NULL: Long.toString(n, radix); } } }
2,775
30.545455
84
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.net.HttpURLConnection; /** Http POST operation parameter. */ public class PostOpParam extends HttpOpParam<PostOpParam.Op> { /** Post operations. */ public static enum Op implements HttpOpParam.Op { APPEND(true, HttpURLConnection.HTTP_OK), CONCAT(false, HttpURLConnection.HTTP_OK), TRUNCATE(false, HttpURLConnection.HTTP_OK), NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); final boolean doOutputAndRedirect; final int expectedHttpResponseCode; Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode) { this.doOutputAndRedirect = doOutputAndRedirect; this.expectedHttpResponseCode = expectedHttpResponseCode; } @Override public Type getType() { return Type.POST; } @Override public boolean getRequireAuth() { return false; } @Override public boolean getDoOutput() { return doOutputAndRedirect; } @Override public boolean getRedirect() { return doOutputAndRedirect; } @Override public int getExpectedHttpResponseCode() { return expectedHttpResponseCode; } /** @return a URI query string. */ @Override public String toQueryString() { return NAME + "=" + this; } } private static final Domain<Op> DOMAIN = new Domain<PostOpParam.Op>(NAME, Op.class); /** * Constructor. * @param str a string representation of the parameter value. */ public PostOpParam(final String str) { super(DOMAIN, DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
2,453
26.886364
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY; import org.apache.hadoop.conf.Configuration; /** Block size parameter. */ public class BlockSizeParam extends LongParam { /** Parameter name. */ public static final String NAME = "blocksize"; /** Default parameter value. */ public static final String DEFAULT = NULL; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public BlockSizeParam(final Long value) { super(DOMAIN, value, 1L, null); } /** * Constructor. * @param str a string representation of the parameter value. */ public BlockSizeParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } /** @return the value or, if it is null, return the default from conf. */ public long getValue(final Configuration conf) { return getValue() != null? getValue() : conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT); } }
1,999
32.333333
88
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.util.regex.Pattern; public class XAttrNameParam extends StringParam { /** Parameter name. **/ public static final String NAME = "xattr.name"; /** Default parameter value. **/ public static final String DEFAULT = ""; private static Domain DOMAIN = new Domain(NAME, Pattern.compile(".*")); public XAttrNameParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str); } @Override public String getName() { return NAME; } public String getXAttrName() { final String v = getValue(); return v; } }
1,438
30.977778
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** * The old snapshot name parameter for renameSnapshot operation. */ public class OldSnapshotNameParam extends StringParam { /** Parameter name. */ public static final String NAME = "oldsnapshotname"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, null); public OldSnapshotNameParam(final String str) { super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null); } @Override public String getName() { return NAME; } }
1,393
33
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.util.Arrays; import org.apache.hadoop.util.StringUtils; abstract class EnumParam<E extends Enum<E>> extends Param<E, EnumParam.Domain<E>> { EnumParam(final Domain<E> domain, final E value) { super(domain, value); } /** The domain of the parameter. */ static final class Domain<E extends Enum<E>> extends Param.Domain<E> { private final Class<E> enumClass; Domain(String name, final Class<E> enumClass) { super(name); this.enumClass = enumClass; } @Override public final String getDomain() { return Arrays.asList(enumClass.getEnumConstants()).toString(); } @Override final E parse(final String str) { return Enum.valueOf(enumClass, StringUtils.toUpperCase(str)); } } }
1,613
32.625
83
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Renewer parameter. */ public class RenewerParam extends StringParam { /** Parameter name. */ public static final String NAME = "renewer"; /** Default parameter value. */ public static final String DEFAULT = NULL; private static final Domain DOMAIN = new Domain(NAME, null); /** * Constructor. * @param str a string representation of the parameter value. */ public RenewerParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT)? null: str); } @Override public String getName() { return NAME; } }
1,413
33.487805
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys .DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT; import java.util.List; import java.util.regex.Pattern; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.commons.lang.StringUtils; /** AclPermission parameter. */ public class AclPermissionParam extends StringParam { /** Parameter name. */ public static final String NAME = "aclspec"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT)); /** * Constructor. * * @param str a string representation of the parameter value. */ public AclPermissionParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str); } public AclPermissionParam(List<AclEntry> acl) { super(DOMAIN,parseAclSpec(acl).equals(DEFAULT) ? null : parseAclSpec(acl)); } @Override public String getName() { return NAME; } public List<AclEntry> getAclPermission(boolean includePermission) { final String v = getValue(); return (v != null ? AclEntry.parseAclSpec(v, includePermission) : AclEntry .parseAclSpec(DEFAULT, includePermission)); } /** * @return parse {@code aclEntry} and return aclspec */ private static String parseAclSpec(List<AclEntry> aclEntry) { return StringUtils.join(aclEntry, ","); } }
2,316
32.57971
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** DoAs parameter for proxy user. */ public class DoAsParam extends StringParam { /** Parameter name. */ public static final String NAME = "doas"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, null); /** * Constructor. * @param str a string representation of the parameter value. */ public DoAsParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT)? null: str); } @Override public String getName() { return NAME; } }
1,414
33.512195
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT; import org.apache.hadoop.security.UserGroupInformation; import com.google.common.annotations.VisibleForTesting; import java.text.MessageFormat; import java.util.regex.Pattern; /** User parameter. */ public class UserParam extends StringParam { /** Parameter name. */ public static final String NAME = "user.name"; /** Default parameter value. */ public static final String DEFAULT = ""; private static Domain domain = new Domain(NAME, Pattern.compile(DFS_WEBHDFS_USER_PATTERN_DEFAULT)); @VisibleForTesting public static Domain getUserPatternDomain() { return domain; } @VisibleForTesting public static void setUserPatternDomain(Domain dm) { domain = dm; } public static void setUserPattern(String pattern) { domain = new Domain(NAME, Pattern.compile(pattern)); } private static String validateLength(String str) { if (str == null) { throw new IllegalArgumentException( MessageFormat.format("Parameter [{0}], cannot be NULL", NAME)); } int len = str.length(); if (len < 1) { throw new IllegalArgumentException(MessageFormat.format( "Parameter [{0}], it's length must be at least 1", NAME)); } return str; } /** * Constructor. * @param str a string representation of the parameter value. */ public UserParam(final String str) { super(domain, str == null || str.equals(DEFAULT)? null : validateLength(str)); } /** * Construct an object from a UGI. */ public UserParam(final UserGroupInformation ugi) { this(ugi.getShortUserName()); } @Override public String getName() { return NAME; } }
2,586
30.168675
101
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.util.Arrays; import java.util.EnumSet; import java.util.Iterator; import org.apache.hadoop.util.StringUtils; abstract class EnumSetParam<E extends Enum<E>> extends Param<EnumSet<E>, EnumSetParam.Domain<E>> { /** Convert an EnumSet to a string of comma separated values. */ static <E extends Enum<E>> String toString(EnumSet<E> set) { if (set == null || set.isEmpty()) { return ""; } else { final StringBuilder b = new StringBuilder(); final Iterator<E> i = set.iterator(); b.append(i.next()); for(; i.hasNext(); ) { b.append(',').append(i.next()); } return b.toString(); } } static <E extends Enum<E>> EnumSet<E> toEnumSet(final Class<E> clazz, final E[] values) { final EnumSet<E> set = EnumSet.noneOf(clazz); set.addAll(Arrays.asList(values)); return set; } EnumSetParam(final Domain<E> domain, final EnumSet<E> value) { super(domain, value); } @Override public String toString() { return getName() + "=" + toString(value); } /** @return the parameter value as a string */ @Override public String getValueString() { return toString(value); } /** The domain of the parameter. */ static final class Domain<E extends Enum<E>> extends Param.Domain<EnumSet<E>> { private final Class<E> enumClass; Domain(String name, final Class<E> enumClass) { super(name); this.enumClass = enumClass; } @Override public final String getDomain() { return Arrays.asList(enumClass.getEnumConstants()).toString(); } /** The string contains a comma separated values. */ @Override final EnumSet<E> parse(final String str) { final EnumSet<E> set = EnumSet.noneOf(enumClass); if (!str.isEmpty()) { for(int i, j = 0; j >= 0; ) { i = j > 0 ? j + 1 : 0; j = str.indexOf(',', i); final String sub = j >= 0? str.substring(i, j): str.substring(i); set.add(Enum.valueOf(enumClass, StringUtils.toUpperCase(sub.trim()))); } } return set; } } }
2,948
30.709677
98
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Recursive parameter. */ public class RecursiveParam extends BooleanParam { /** Parameter name. */ public static final String NAME = "recursive"; /** Default parameter value. */ public static final String DEFAULT = FALSE; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public RecursiveParam(final Boolean value) { super(DOMAIN, value); } /** * Constructor. * @param str a string representation of the parameter value. */ public RecursiveParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
1,527
30.183673
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import org.apache.hadoop.security.UserGroupInformation; /** Represents delegation token used for authentication. */ public class DelegationParam extends StringParam { /** Parameter name. */ public static final String NAME = "delegation"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, null); /** * Constructor. * @param str a string representation of the parameter value. */ public DelegationParam(final String str) { super(DOMAIN, UserGroupInformation.isSecurityEnabled() && str != null && !str.equals(DEFAULT)? str: null); } @Override public String getName() { return NAME; } }
1,564
34.568182
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Integer parameter. */ abstract class IntegerParam extends Param<Integer, IntegerParam.Domain> { IntegerParam(final Domain domain, final Integer value, final Integer min, final Integer max) { super(domain, value); checkRange(min, max); } private void checkRange(final Integer min, final Integer max) { if (value == null) { return; } if (min != null && value < min) { throw new IllegalArgumentException("Invalid parameter range: " + getName() + " = " + domain.toString(value) + " < " + domain.toString(min)); } if (max != null && value > max) { throw new IllegalArgumentException("Invalid parameter range: " + getName() + " = " + domain.toString(value) + " > " + domain.toString(max)); } } @Override public String toString() { return getName() + "=" + domain.toString(getValue()); } /** @return the parameter value as a string */ @Override public String getValueString() { return domain.toString(getValue()); } /** The domain of the parameter. */ static final class Domain extends Param.Domain<Integer> { /** The radix of the number. */ final int radix; Domain(final String paramName) { this(paramName, 10); } Domain(final String paramName, final int radix) { super(paramName); this.radix = radix; } @Override public String getDomain() { return "<" + NULL + " | int in radix " + radix + ">"; } @Override Integer parse(final String str) { try{ return NULL.equals(str) || str == null ? null : Integer.parseInt(str, radix); } catch(NumberFormatException e) { throw new IllegalArgumentException("Failed to parse \"" + str + "\" as a radix-" + radix + " integer.", e); } } /** Convert an Integer to a String. */ String toString(final Integer n) { return n == null? NULL: Integer.toString(n, radix); } } }
2,823
30.730337
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Short parameter. */ abstract class ShortParam extends Param<Short, ShortParam.Domain> { ShortParam(final Domain domain, final Short value, final Short min, final Short max) { super(domain, value); checkRange(min, max); } private void checkRange(final Short min, final Short max) { if (value == null) { return; } if (min != null && value < min) { throw new IllegalArgumentException("Invalid parameter range: " + getName() + " = " + domain.toString(value) + " < " + domain.toString(min)); } if (max != null && value > max) { throw new IllegalArgumentException("Invalid parameter range: " + getName() + " = " + domain.toString(value) + " > " + domain.toString(max)); } } @Override public String toString() { return getName() + "=" + domain.toString(getValue()); } /** @return the parameter value as a string */ @Override public final String getValueString() { return domain.toString(getValue()); } /** The domain of the parameter. */ static final class Domain extends Param.Domain<Short> { /** The radix of the number. */ final int radix; Domain(final String paramName) { this(paramName, 10); } Domain(final String paramName, final int radix) { super(paramName); this.radix = radix; } @Override public String getDomain() { return "<" + NULL + " | short in radix " + radix + ">"; } @Override Short parse(final String str) { try { return NULL.equals(str) || str == null ? null : Short.parseShort(str, radix); } catch(NumberFormatException e) { throw new IllegalArgumentException("Failed to parse \"" + str + "\" as a radix-" + radix + " short integer.", e); } } /** Convert a Short to a String. */ String toString(final Short n) { return n == null? NULL: Integer.toString(n, radix); } } }
2,809
30.573034
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** * The snapshot name parameter for createSnapshot and deleteSnapshot operation. * Also used to indicate the new snapshot name for renameSnapshot operation. */ public class SnapshotNameParam extends StringParam { /** Parameter name. */ public static final String NAME = "snapshotname"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, null); public SnapshotNameParam(final String str) { super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null); } @Override public String getName() { return NAME; } }
1,476
34.166667
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Length parameter. */ public class LengthParam extends LongParam { /** Parameter name. */ public static final String NAME = "length"; /** Default parameter value. */ public static final String DEFAULT = NULL; private static final Domain DOMAIN = new Domain(NAME); /** * Constructor. * @param value the parameter value. */ public LengthParam(final Long value) { super(DOMAIN, value, 0L, null); } /** * Constructor. * @param str a string representation of the parameter value. */ public LengthParam(final String str) { this(DOMAIN.parse(str)); } @Override public String getName() { return NAME; } public long getLength() { Long v = getValue(); return v == null ? -1 : v; } }
1,604
28.722222
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import org.apache.hadoop.fs.XAttrCodec; public class XAttrEncodingParam extends EnumParam<XAttrCodec> { /** Parameter name. */ public static final String NAME = "encoding"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain<XAttrCodec> DOMAIN = new Domain<XAttrCodec>(NAME, XAttrCodec.class); public XAttrEncodingParam(final XAttrCodec encoding) { super(DOMAIN, encoding); } /** * Constructor. * @param str a string representation of the parameter value. */ public XAttrEncodingParam(final String str) { super(DOMAIN, str != null && !str.isEmpty() ? DOMAIN.parse(str) : null); } @Override public String getName() { return NAME; } @Override public String getValueString() { return value.toString(); } public XAttrCodec getEncoding() { return getValue(); } }
1,739
29.526316
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.util.regex.Pattern; /** String parameter. */ abstract class StringParam extends Param<String, StringParam.Domain> { StringParam(final Domain domain, String str) { super(domain, domain.parse(str)); } /** @return the parameter value as a string */ @Override public String getValueString() { return value; } /** The domain of the parameter. */ static final class Domain extends Param.Domain<String> { /** The pattern defining the domain; null . */ private final Pattern pattern; Domain(final String paramName, final Pattern pattern) { super(paramName); this.pattern = pattern; } @Override public final String getDomain() { return pattern == null ? "<String>" : pattern.pattern(); } @Override final String parse(final String str) { if (str != null && pattern != null) { if (!pattern.matcher(str).matches()) { throw new IllegalArgumentException("Invalid value: \"" + str + "\" does not belong to the domain " + getDomain()); } } return str; } } }
1,956
31.081967
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import org.apache.hadoop.fs.Options; /** Rename option set parameter. */ public class RenameOptionSetParam extends EnumSetParam<Options.Rename> { /** Parameter name. */ public static final String NAME = "renameoptions"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain<Options.Rename> DOMAIN = new Domain<Options.Rename>( NAME, Options.Rename.class); /** * Constructor. * @param options rename options. */ public RenameOptionSetParam(final Options.Rename... options) { super(DOMAIN, toEnumSet(Options.Rename.class, options)); } /** * Constructor. * @param str a string representation of the parameter value. */ public RenameOptionSetParam(final String str) { super(DOMAIN, DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
1,722
32.134615
82
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/Param.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.util.Arrays; import java.util.Comparator; /** Base class of parameters. */ public abstract class Param<T, D extends Param.Domain<T>> { static final String NULL = "null"; static final Comparator<Param<?,?>> NAME_CMP = new Comparator<Param<?,?>>() { @Override public int compare(Param<?, ?> left, Param<?, ?> right) { return left.getName().compareTo(right.getName()); } }; /** Convert the parameters to a sorted String. * * @param separator URI parameter separator character * @param parameters parameters to encode into a string * @return the encoded URI string */ public static String toSortedString(final String separator, final Param<?, ?>... parameters) { Arrays.sort(parameters, NAME_CMP); final StringBuilder b = new StringBuilder(); try { for(Param<?, ?> p : parameters) { if (p.getValue() != null) { b.append(separator).append( URLEncoder.encode(p.getName(), "UTF-8") + "=" + URLEncoder.encode(p.getValueString(), "UTF-8")); } } } catch (UnsupportedEncodingException e) { // Sane systems know about UTF-8, so this should never happen. throw new RuntimeException(e); } return b.toString(); } /** The domain of the parameter. */ final D domain; /** The actual parameter value. */ final T value; Param(final D domain, final T value) { this.domain = domain; this.value = value; } /** @return the parameter value. */ public final T getValue() { return value; } /** @return the parameter value as a string */ public abstract String getValueString(); /** @return the parameter name. */ public abstract String getName(); @Override public String toString() { return getName() + "=" + value; } /** Base class of parameter domains. */ static abstract class Domain<T> { /** Parameter name. */ final String paramName; Domain(final String paramName) { this.paramName = paramName; } /** @return the parameter name. */ public final String getParamName() { return paramName; } /** @return a string description of the domain of the parameter. */ public abstract String getDomain(); /** @return the parameter value represented by the string. */ abstract T parse(String str); /** Parse the given string. * @return the parameter value represented by the string. */ public final T parse(final String varName, final String str) { try { return str != null && str.trim().length() > 0 ? parse(str) : null; } catch(Exception e) { throw new IllegalArgumentException("Failed to parse \"" + str + "\" for the parameter " + varName + ". The value must be in the domain " + getDomain(), e); } } } }
3,779
29.731707
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** * Represents delegation token parameter as method arguments. This is * different from {@link DelegationParam}. */ public class TokenArgumentParam extends StringParam { /** Parameter name. */ public static final String NAME = "token"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, null); /** * Constructor. * @param str A string representation of the parameter value. */ public TokenArgumentParam(final String str) { super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null); } @Override public String getName() { return NAME; } }
1,520
32.8
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Group parameter. */ public class GroupParam extends StringParam { /** Parameter name. */ public static final String NAME = "group"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, null); /** * Constructor. * @param str a string representation of the parameter value. */ public GroupParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT)? null: str); } @Override public String getName() { return NAME; } }
1,403
33.243902
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.web.resources; /** Exclude datanodes param */ public class ExcludeDatanodesParam extends StringParam { /** Parameter name. */ public static final String NAME = "excludedatanodes"; /** Default parameter value. */ public static final String DEFAULT = ""; private static final Domain DOMAIN = new Domain(NAME, null); /** * Constructor. * @param str a string representation of the parameter value. */ public ExcludeDatanodesParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT)? null: DOMAIN.parse(str)); } @Override public String getName() { return NAME; } }
1,458
33.738095
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java
/* Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * This class defines a partial listing of a directory to support * iterative directory listing. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class DirectoryListing { private HdfsFileStatus[] partialListing; private int remainingEntries; /** * constructor * @param partialListing a partial listing of a directory * @param remainingEntries number of entries that are left to be listed */ public DirectoryListing(HdfsFileStatus[] partialListing, int remainingEntries) { if (partialListing == null) { throw new IllegalArgumentException("partial listing should not be null"); } if (partialListing.length == 0 && remainingEntries != 0) { throw new IllegalArgumentException("Partial listing is empty but " + "the number of remaining entries is not zero"); } this.partialListing = partialListing; this.remainingEntries = remainingEntries; } /** * Get the partial listing of file status * @return the partial listing of file status */ public HdfsFileStatus[] getPartialListing() { return partialListing; } /** * Get the number of remaining entries that are left to be listed * @return the number of remaining entries that are left to be listed */ public int getRemainingEntries() { return remainingEntries; } /** * Check if there are more entries that are left to be listed * @return true if there are more entries that are left to be listed; * return false otherwise. */ public boolean hasMore() { return remainingEntries != 0; } /** * Get the last name in this list * @return the last name in the list if it is not empty; otherwise return null */ public byte[] getLastName() { if (partialListing.length == 0) { return null; } return partialListing[partialListing.length-1].getLocalNameInBytes(); } }
2,866
32.337209
80
java