repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/compiler/generated/RccTokenManager.java
|
/* Generated By:JavaCC: Do not edit this line. RccTokenManager.java */
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.record.compiler.generated;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
*/
@Deprecated
@InterfaceAudience.Public
@InterfaceStability.Stable
public class RccTokenManager implements RccConstants
{
public java.io.PrintStream debugStream = System.out;
public void setDebugStream(java.io.PrintStream ds) { debugStream = ds; }
private final int jjMoveStringLiteralDfa0_1()
{
return jjMoveNfa_1(0, 0);
}
private final void jjCheckNAdd(int state)
{
if (jjrounds[state] != jjround)
{
jjstateSet[jjnewStateCnt++] = state;
jjrounds[state] = jjround;
}
}
private final void jjAddStates(int start, int end)
{
do {
jjstateSet[jjnewStateCnt++] = jjnextStates[start];
} while (start++ != end);
}
private final void jjCheckNAddTwoStates(int state1, int state2)
{
jjCheckNAdd(state1);
jjCheckNAdd(state2);
}
private final void jjCheckNAddStates(int start, int end)
{
do {
jjCheckNAdd(jjnextStates[start]);
} while (start++ != end);
}
private final void jjCheckNAddStates(int start)
{
jjCheckNAdd(jjnextStates[start]);
jjCheckNAdd(jjnextStates[start + 1]);
}
private final int jjMoveNfa_1(int startState, int curPos)
{
int[] nextStates;
int startsAt = 0;
jjnewStateCnt = 3;
int i = 1;
jjstateSet[0] = startState;
int j, kind = 0x7fffffff;
for (;;)
{
if (++jjround == 0x7fffffff)
ReInitRounds();
if (curChar < 64)
{
long l = 1L << curChar;
MatchLoop: do
{
switch(jjstateSet[--i])
{
case 0:
if ((0x2400L & l) != 0L)
{
if (kind > 6)
kind = 6;
}
if (curChar == 13)
jjstateSet[jjnewStateCnt++] = 1;
break;
case 1:
if (curChar == 10 && kind > 6)
kind = 6;
break;
case 2:
if (curChar == 13)
jjstateSet[jjnewStateCnt++] = 1;
break;
default : break;
}
} while(i != startsAt);
}
else if (curChar < 128)
{
long l = 1L << (curChar & 077);
MatchLoop: do
{
switch(jjstateSet[--i])
{
default : break;
}
} while(i != startsAt);
}
else
{
int i2 = (curChar & 0xff) >> 6;
long l2 = 1L << (curChar & 077);
MatchLoop: do
{
switch(jjstateSet[--i])
{
default : break;
}
} while(i != startsAt);
}
if (kind != 0x7fffffff)
{
jjmatchedKind = kind;
jjmatchedPos = curPos;
kind = 0x7fffffff;
}
++curPos;
if ((i = jjnewStateCnt) == (startsAt = 3 - (jjnewStateCnt = startsAt)))
return curPos;
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) { return curPos; }
}
}
private final int jjStopStringLiteralDfa_0(int pos, long active0)
{
switch (pos)
{
case 0:
if ((active0 & 0xfff800L) != 0L)
{
jjmatchedKind = 32;
return 4;
}
return -1;
case 1:
if ((active0 & 0xfff800L) != 0L)
{
jjmatchedKind = 32;
jjmatchedPos = 1;
return 4;
}
return -1;
case 2:
if ((active0 & 0x7ef800L) != 0L)
{
jjmatchedKind = 32;
jjmatchedPos = 2;
return 4;
}
if ((active0 & 0x810000L) != 0L)
return 4;
return -1;
case 3:
if ((active0 & 0x24000L) != 0L)
return 4;
if ((active0 & 0x7cb800L) != 0L)
{
jjmatchedKind = 32;
jjmatchedPos = 3;
return 4;
}
return -1;
case 4:
if ((active0 & 0x41000L) != 0L)
return 4;
if ((active0 & 0x78a800L) != 0L)
{
jjmatchedKind = 32;
jjmatchedPos = 4;
return 4;
}
return -1;
case 5:
if ((active0 & 0x680800L) != 0L)
return 4;
if ((active0 & 0x10a000L) != 0L)
{
jjmatchedKind = 32;
jjmatchedPos = 5;
return 4;
}
return -1;
default :
return -1;
}
}
private final int jjStartNfa_0(int pos, long active0)
{
return jjMoveNfa_0(jjStopStringLiteralDfa_0(pos, active0), pos + 1);
}
private final int jjStopAtPos(int pos, int kind)
{
jjmatchedKind = kind;
jjmatchedPos = pos;
return pos + 1;
}
private final int jjStartNfaWithStates_0(int pos, int kind, int state)
{
jjmatchedKind = kind;
jjmatchedPos = pos;
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) { return pos + 1; }
return jjMoveNfa_0(state, pos + 1);
}
private final int jjMoveStringLiteralDfa0_0()
{
switch(curChar)
{
case 44:
return jjStopAtPos(0, 29);
case 46:
return jjStopAtPos(0, 30);
case 47:
return jjMoveStringLiteralDfa1_0(0x120L);
case 59:
return jjStopAtPos(0, 28);
case 60:
return jjStopAtPos(0, 26);
case 62:
return jjStopAtPos(0, 27);
case 98:
return jjMoveStringLiteralDfa1_0(0x20c000L);
case 99:
return jjMoveStringLiteralDfa1_0(0x1000L);
case 100:
return jjMoveStringLiteralDfa1_0(0x80000L);
case 102:
return jjMoveStringLiteralDfa1_0(0x40000L);
case 105:
return jjMoveStringLiteralDfa1_0(0x12000L);
case 108:
return jjMoveStringLiteralDfa1_0(0x20000L);
case 109:
return jjMoveStringLiteralDfa1_0(0x800800L);
case 117:
return jjMoveStringLiteralDfa1_0(0x100000L);
case 118:
return jjMoveStringLiteralDfa1_0(0x400000L);
case 123:
return jjStopAtPos(0, 24);
case 125:
return jjStopAtPos(0, 25);
default :
return jjMoveNfa_0(0, 0);
}
}
private final int jjMoveStringLiteralDfa1_0(long active0)
{
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) {
jjStopStringLiteralDfa_0(0, active0);
return 1;
}
switch(curChar)
{
case 42:
if ((active0 & 0x100L) != 0L)
return jjStopAtPos(1, 8);
break;
case 47:
if ((active0 & 0x20L) != 0L)
return jjStopAtPos(1, 5);
break;
case 97:
return jjMoveStringLiteralDfa2_0(active0, 0x800000L);
case 101:
return jjMoveStringLiteralDfa2_0(active0, 0x400000L);
case 108:
return jjMoveStringLiteralDfa2_0(active0, 0x41000L);
case 110:
return jjMoveStringLiteralDfa2_0(active0, 0x12000L);
case 111:
return jjMoveStringLiteralDfa2_0(active0, 0xa8800L);
case 115:
return jjMoveStringLiteralDfa2_0(active0, 0x100000L);
case 117:
return jjMoveStringLiteralDfa2_0(active0, 0x200000L);
case 121:
return jjMoveStringLiteralDfa2_0(active0, 0x4000L);
default :
break;
}
return jjStartNfa_0(0, active0);
}
private final int jjMoveStringLiteralDfa2_0(long old0, long active0)
{
if (((active0 &= old0)) == 0L)
return jjStartNfa_0(0, old0);
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) {
jjStopStringLiteralDfa_0(1, active0);
return 2;
}
switch(curChar)
{
case 97:
return jjMoveStringLiteralDfa3_0(active0, 0x1000L);
case 99:
return jjMoveStringLiteralDfa3_0(active0, 0x402000L);
case 100:
return jjMoveStringLiteralDfa3_0(active0, 0x800L);
case 102:
return jjMoveStringLiteralDfa3_0(active0, 0x200000L);
case 110:
return jjMoveStringLiteralDfa3_0(active0, 0x20000L);
case 111:
return jjMoveStringLiteralDfa3_0(active0, 0x48000L);
case 112:
if ((active0 & 0x800000L) != 0L)
return jjStartNfaWithStates_0(2, 23, 4);
break;
case 116:
if ((active0 & 0x10000L) != 0L)
return jjStartNfaWithStates_0(2, 16, 4);
return jjMoveStringLiteralDfa3_0(active0, 0x104000L);
case 117:
return jjMoveStringLiteralDfa3_0(active0, 0x80000L);
default :
break;
}
return jjStartNfa_0(1, active0);
}
private final int jjMoveStringLiteralDfa3_0(long old0, long active0)
{
if (((active0 &= old0)) == 0L)
return jjStartNfa_0(1, old0);
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) {
jjStopStringLiteralDfa_0(2, active0);
return 3;
}
switch(curChar)
{
case 97:
return jjMoveStringLiteralDfa4_0(active0, 0x40000L);
case 98:
return jjMoveStringLiteralDfa4_0(active0, 0x80000L);
case 101:
if ((active0 & 0x4000L) != 0L)
return jjStartNfaWithStates_0(3, 14, 4);
break;
case 102:
return jjMoveStringLiteralDfa4_0(active0, 0x200000L);
case 103:
if ((active0 & 0x20000L) != 0L)
return jjStartNfaWithStates_0(3, 17, 4);
break;
case 108:
return jjMoveStringLiteralDfa4_0(active0, 0xa000L);
case 114:
return jjMoveStringLiteralDfa4_0(active0, 0x100000L);
case 115:
return jjMoveStringLiteralDfa4_0(active0, 0x1000L);
case 116:
return jjMoveStringLiteralDfa4_0(active0, 0x400000L);
case 117:
return jjMoveStringLiteralDfa4_0(active0, 0x800L);
default :
break;
}
return jjStartNfa_0(2, active0);
}
private final int jjMoveStringLiteralDfa4_0(long old0, long active0)
{
if (((active0 &= old0)) == 0L)
return jjStartNfa_0(2, old0);
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) {
jjStopStringLiteralDfa_0(3, active0);
return 4;
}
switch(curChar)
{
case 101:
return jjMoveStringLiteralDfa5_0(active0, 0x208000L);
case 105:
return jjMoveStringLiteralDfa5_0(active0, 0x100000L);
case 108:
return jjMoveStringLiteralDfa5_0(active0, 0x80800L);
case 111:
return jjMoveStringLiteralDfa5_0(active0, 0x400000L);
case 115:
if ((active0 & 0x1000L) != 0L)
return jjStartNfaWithStates_0(4, 12, 4);
break;
case 116:
if ((active0 & 0x40000L) != 0L)
return jjStartNfaWithStates_0(4, 18, 4);
break;
case 117:
return jjMoveStringLiteralDfa5_0(active0, 0x2000L);
default :
break;
}
return jjStartNfa_0(3, active0);
}
private final int jjMoveStringLiteralDfa5_0(long old0, long active0)
{
if (((active0 &= old0)) == 0L)
return jjStartNfa_0(3, old0);
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) {
jjStopStringLiteralDfa_0(4, active0);
return 5;
}
switch(curChar)
{
case 97:
return jjMoveStringLiteralDfa6_0(active0, 0x8000L);
case 100:
return jjMoveStringLiteralDfa6_0(active0, 0x2000L);
case 101:
if ((active0 & 0x800L) != 0L)
return jjStartNfaWithStates_0(5, 11, 4);
else if ((active0 & 0x80000L) != 0L)
return jjStartNfaWithStates_0(5, 19, 4);
break;
case 110:
return jjMoveStringLiteralDfa6_0(active0, 0x100000L);
case 114:
if ((active0 & 0x200000L) != 0L)
return jjStartNfaWithStates_0(5, 21, 4);
else if ((active0 & 0x400000L) != 0L)
return jjStartNfaWithStates_0(5, 22, 4);
break;
default :
break;
}
return jjStartNfa_0(4, active0);
}
private final int jjMoveStringLiteralDfa6_0(long old0, long active0)
{
if (((active0 &= old0)) == 0L)
return jjStartNfa_0(4, old0);
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) {
jjStopStringLiteralDfa_0(5, active0);
return 6;
}
switch(curChar)
{
case 101:
if ((active0 & 0x2000L) != 0L)
return jjStartNfaWithStates_0(6, 13, 4);
break;
case 103:
if ((active0 & 0x100000L) != 0L)
return jjStartNfaWithStates_0(6, 20, 4);
break;
case 110:
if ((active0 & 0x8000L) != 0L)
return jjStartNfaWithStates_0(6, 15, 4);
break;
default :
break;
}
return jjStartNfa_0(5, active0);
}
static final long[] jjbitVec0 = {
0x0L, 0x0L, 0xffffffffffffffffL, 0xffffffffffffffffL
};
private final int jjMoveNfa_0(int startState, int curPos)
{
int[] nextStates;
int startsAt = 0;
jjnewStateCnt = 5;
int i = 1;
jjstateSet[0] = startState;
int j, kind = 0x7fffffff;
for (;;)
{
if (++jjround == 0x7fffffff)
ReInitRounds();
if (curChar < 64)
{
long l = 1L << curChar;
MatchLoop: do
{
switch(jjstateSet[--i])
{
case 0:
if (curChar == 34)
jjCheckNAdd(1);
break;
case 1:
if ((0xfffffffbffffffffL & l) != 0L)
jjCheckNAddTwoStates(1, 2);
break;
case 2:
if (curChar == 34 && kind > 31)
kind = 31;
break;
case 4:
if ((0x3ff000000000000L & l) == 0L)
break;
if (kind > 32)
kind = 32;
jjstateSet[jjnewStateCnt++] = 4;
break;
default : break;
}
} while(i != startsAt);
}
else if (curChar < 128)
{
long l = 1L << (curChar & 077);
MatchLoop: do
{
switch(jjstateSet[--i])
{
case 0:
if ((0x7fffffe07fffffeL & l) == 0L)
break;
if (kind > 32)
kind = 32;
jjCheckNAdd(4);
break;
case 1:
jjAddStates(0, 1);
break;
case 4:
if ((0x7fffffe87fffffeL & l) == 0L)
break;
if (kind > 32)
kind = 32;
jjCheckNAdd(4);
break;
default : break;
}
} while(i != startsAt);
}
else
{
int i2 = (curChar & 0xff) >> 6;
long l2 = 1L << (curChar & 077);
MatchLoop: do
{
switch(jjstateSet[--i])
{
case 1:
if ((jjbitVec0[i2] & l2) != 0L)
jjAddStates(0, 1);
break;
default : break;
}
} while(i != startsAt);
}
if (kind != 0x7fffffff)
{
jjmatchedKind = kind;
jjmatchedPos = curPos;
kind = 0x7fffffff;
}
++curPos;
if ((i = jjnewStateCnt) == (startsAt = 5 - (jjnewStateCnt = startsAt)))
return curPos;
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) { return curPos; }
}
}
private final int jjMoveStringLiteralDfa0_2()
{
switch(curChar)
{
case 42:
return jjMoveStringLiteralDfa1_2(0x200L);
default :
return 1;
}
}
private final int jjMoveStringLiteralDfa1_2(long active0)
{
try { curChar = input_stream.readChar(); }
catch(java.io.IOException e) {
return 1;
}
switch(curChar)
{
case 47:
if ((active0 & 0x200L) != 0L)
return jjStopAtPos(1, 9);
break;
default :
return 2;
}
return 2;
}
static final int[] jjnextStates = {
1, 2,
};
public static final String[] jjstrLiteralImages = {
"", null, null, null, null, null, null, null, null, null, null,
"\155\157\144\165\154\145", "\143\154\141\163\163", "\151\156\143\154\165\144\145", "\142\171\164\145",
"\142\157\157\154\145\141\156", "\151\156\164", "\154\157\156\147", "\146\154\157\141\164",
"\144\157\165\142\154\145", "\165\163\164\162\151\156\147", "\142\165\146\146\145\162",
"\166\145\143\164\157\162", "\155\141\160", "\173", "\175", "\74", "\76", "\73", "\54", "\56", null, null, };
public static final String[] lexStateNames = {
"DEFAULT",
"WithinOneLineComment",
"WithinMultiLineComment",
};
public static final int[] jjnewLexState = {
-1, -1, -1, -1, -1, 1, 0, -1, 2, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
};
static final long[] jjtoToken = {
0x1fffff801L,
};
static final long[] jjtoSkip = {
0x37eL,
};
static final long[] jjtoSpecial = {
0x360L,
};
static final long[] jjtoMore = {
0x480L,
};
protected SimpleCharStream input_stream;
private final int[] jjrounds = new int[5];
private final int[] jjstateSet = new int[10];
StringBuffer image;
int jjimageLen;
int lengthOfMatch;
protected char curChar;
public RccTokenManager(SimpleCharStream stream){
if (SimpleCharStream.staticFlag)
throw new Error("ERROR: Cannot use a static CharStream class with a non-static lexical analyzer.");
input_stream = stream;
}
public RccTokenManager(SimpleCharStream stream, int lexState){
this(stream);
SwitchTo(lexState);
}
public void ReInit(SimpleCharStream stream)
{
jjmatchedPos = jjnewStateCnt = 0;
curLexState = defaultLexState;
input_stream = stream;
ReInitRounds();
}
private final void ReInitRounds()
{
int i;
jjround = 0x80000001;
for (i = 5; i-- > 0;)
jjrounds[i] = 0x80000000;
}
public void ReInit(SimpleCharStream stream, int lexState)
{
ReInit(stream);
SwitchTo(lexState);
}
public void SwitchTo(int lexState)
{
if (lexState >= 3 || lexState < 0)
throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
else
curLexState = lexState;
}
protected Token jjFillToken()
{
Token t = Token.newToken(jjmatchedKind);
t.kind = jjmatchedKind;
String im = jjstrLiteralImages[jjmatchedKind];
t.image = (im == null) ? input_stream.GetImage() : im;
t.beginLine = input_stream.getBeginLine();
t.beginColumn = input_stream.getBeginColumn();
t.endLine = input_stream.getEndLine();
t.endColumn = input_stream.getEndColumn();
return t;
}
int curLexState = 0;
int defaultLexState = 0;
int jjnewStateCnt;
int jjround;
int jjmatchedPos;
int jjmatchedKind;
public Token getNextToken()
{
int kind;
Token specialToken = null;
Token matchedToken;
int curPos = 0;
EOFLoop :
for (;;)
{
try
{
curChar = input_stream.BeginToken();
}
catch(java.io.IOException e)
{
jjmatchedKind = 0;
matchedToken = jjFillToken();
matchedToken.specialToken = specialToken;
return matchedToken;
}
image = null;
jjimageLen = 0;
for (;;)
{
switch(curLexState)
{
case 0:
try { input_stream.backup(0);
while (curChar <= 32 && (0x100002600L & (1L << curChar)) != 0L)
curChar = input_stream.BeginToken();
}
catch (java.io.IOException e1) { continue EOFLoop; }
jjmatchedKind = 0x7fffffff;
jjmatchedPos = 0;
curPos = jjMoveStringLiteralDfa0_0();
break;
case 1:
jjmatchedKind = 0x7fffffff;
jjmatchedPos = 0;
curPos = jjMoveStringLiteralDfa0_1();
if (jjmatchedPos == 0 && jjmatchedKind > 7)
{
jjmatchedKind = 7;
}
break;
case 2:
jjmatchedKind = 0x7fffffff;
jjmatchedPos = 0;
curPos = jjMoveStringLiteralDfa0_2();
if (jjmatchedPos == 0 && jjmatchedKind > 10)
{
jjmatchedKind = 10;
}
break;
}
if (jjmatchedKind != 0x7fffffff)
{
if (jjmatchedPos + 1 < curPos)
input_stream.backup(curPos - jjmatchedPos - 1);
if ((jjtoToken[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L)
{
matchedToken = jjFillToken();
matchedToken.specialToken = specialToken;
if (jjnewLexState[jjmatchedKind] != -1)
curLexState = jjnewLexState[jjmatchedKind];
return matchedToken;
}
else if ((jjtoSkip[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L)
{
if ((jjtoSpecial[jjmatchedKind >> 6] & (1L << (jjmatchedKind & 077))) != 0L)
{
matchedToken = jjFillToken();
if (specialToken == null)
specialToken = matchedToken;
else
{
matchedToken.specialToken = specialToken;
specialToken = (specialToken.next = matchedToken);
}
SkipLexicalActions(matchedToken);
}
else
SkipLexicalActions(null);
if (jjnewLexState[jjmatchedKind] != -1)
curLexState = jjnewLexState[jjmatchedKind];
continue EOFLoop;
}
jjimageLen += jjmatchedPos + 1;
if (jjnewLexState[jjmatchedKind] != -1)
curLexState = jjnewLexState[jjmatchedKind];
curPos = 0;
jjmatchedKind = 0x7fffffff;
try {
curChar = input_stream.readChar();
continue;
}
catch (java.io.IOException e1) { }
}
int error_line = input_stream.getEndLine();
int error_column = input_stream.getEndColumn();
String error_after = null;
boolean EOFSeen = false;
try { input_stream.readChar(); input_stream.backup(1); }
catch (java.io.IOException e1) {
EOFSeen = true;
error_after = curPos <= 1 ? "" : input_stream.GetImage();
if (curChar == '\n' || curChar == '\r') {
error_line++;
error_column = 0;
}
else
error_column++;
}
if (!EOFSeen) {
input_stream.backup(1);
error_after = curPos <= 1 ? "" : input_stream.GetImage();
}
throw new TokenMgrError(EOFSeen, curLexState, error_line, error_column, error_after, curChar, TokenMgrError.LEXICAL_ERROR);
}
}
}
void SkipLexicalActions(Token matchedToken)
{
switch(jjmatchedKind)
{
default :
break;
}
}
}
| 25,619 | 29.719424 | 145 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.net.URLStreamHandlerFactory;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* Factory for URL stream handlers.
*
* There is only one handler whose job is to create UrlConnections. A
* FsUrlConnection relies on FileSystem to choose the appropriate FS
* implementation.
*
* Before returning our handler, we make sure that FileSystem knows an
* implementation for the requested scheme/protocol.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class FsUrlStreamHandlerFactory implements
URLStreamHandlerFactory {
// The configuration holds supported FS implementation class names.
private Configuration conf;
// This map stores whether a protocol is know or not by FileSystem
private Map<String, Boolean> protocols =
new ConcurrentHashMap<String, Boolean>();
// The URL Stream handler
private java.net.URLStreamHandler handler;
public FsUrlStreamHandlerFactory() {
this(new Configuration());
}
public FsUrlStreamHandlerFactory(Configuration conf) {
this.conf = new Configuration(conf);
// force init of FileSystem code to avoid HADOOP-9041
try {
FileSystem.getFileSystemClass("file", conf);
} catch (IOException io) {
throw new RuntimeException(io);
}
this.handler = new FsUrlStreamHandler(this.conf);
}
@Override
public java.net.URLStreamHandler createURLStreamHandler(String protocol) {
if (!protocols.containsKey(protocol)) {
boolean known = true;
try {
FileSystem.getFileSystemClass(protocol, conf);
}
catch (IOException ex) {
known = false;
}
protocols.put(protocol, known);
}
if (protocols.get(protocol)) {
return handler;
} else {
// FileSystem does not know the protocol, let the VM handle this
return null;
}
}
}
| 2,890 | 31.122222 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.StringReader;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ExitCodeException;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import com.google.common.annotations.VisibleForTesting;
import static java.nio.file.Files.createLink;
/**
* Class for creating hardlinks.
* Supports Unix/Linux, Windows via winutils , and Mac OS X.
*
* The HardLink class was formerly a static inner class of FSUtil,
* and the methods provided were blatantly non-thread-safe.
* To enable volume-parallel Update snapshots, we now provide static
* threadsafe methods that allocate new buffer string arrays
* upon each call. We also provide an API to hardlink all files in a
* directory with a single command, which is up to 128 times more
* efficient - and minimizes the impact of the extra buffer creations.
*/
public class HardLink {
private static HardLinkCommandGetter getHardLinkCommand;
public final LinkStats linkStats; //not static
//initialize the command "getters" statically, so can use their
//methods without instantiating the HardLink object
static {
if (Shell.WINDOWS) {
// Windows
getHardLinkCommand = new HardLinkCGWin();
} else {
// Unix or Linux
getHardLinkCommand = new HardLinkCGUnix();
//override getLinkCountCommand for the particular Unix variant
//Linux is already set as the default - {"stat","-c%h", null}
if (Shell.MAC || Shell.FREEBSD) {
String[] linkCountCmdTemplate = {"/usr/bin/stat","-f%l", null};
HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);
} else if (Shell.SOLARIS) {
String[] linkCountCmdTemplate = {"ls","-l", null};
HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);
}
}
}
public HardLink() {
linkStats = new LinkStats();
}
/**
* This abstract class bridges the OS-dependent implementations of the
* needed functionality for querying link counts.
* The particular implementation class is chosen during
* static initialization phase of the HardLink class.
* The "getter" methods construct shell command strings.
*/
private static abstract class HardLinkCommandGetter {
/**
* Get the command string to query the hardlink count of a file
*/
abstract String[] linkCount(File file) throws IOException;
}
/**
* Implementation of HardLinkCommandGetter class for Unix
*/
private static class HardLinkCGUnix extends HardLinkCommandGetter {
private static String[] getLinkCountCommand = {"stat","-c%h", null};
private static synchronized
void setLinkCountCmdTemplate(String[] template) {
//May update this for specific unix variants,
//after static initialization phase
getLinkCountCommand = template;
}
/*
* @see org.apache.hadoop.fs.HardLink.HardLinkCommandGetter#linkCount(java.io.File)
*/
@Override
String[] linkCount(File file)
throws IOException {
String[] buf = new String[getLinkCountCommand.length];
System.arraycopy(getLinkCountCommand, 0, buf, 0,
getLinkCountCommand.length);
buf[getLinkCountCommand.length - 1] = FileUtil.makeShellPath(file, true);
return buf;
}
}
/**
* Implementation of HardLinkCommandGetter class for Windows
*/
@VisibleForTesting
static class HardLinkCGWin extends HardLinkCommandGetter {
static String[] getLinkCountCommand = {
Shell.WINUTILS, "hardlink", "stat", null};
/*
* @see org.apache.hadoop.fs.HardLink.HardLinkCommandGetter#linkCount(java.io.File)
*/
@Override
String[] linkCount(File file) throws IOException {
String[] buf = new String[getLinkCountCommand.length];
System.arraycopy(getLinkCountCommand, 0, buf, 0,
getLinkCountCommand.length);
buf[getLinkCountCommand.length - 1] = file.getCanonicalPath();
return buf;
}
}
/*
* ****************************************************
* Complexity is above. User-visible functionality is below
* ****************************************************
*/
/**
* Creates a hardlink
* @param file - existing source file
* @param linkName - desired target link file
*/
public static void createHardLink(File file, File linkName)
throws IOException {
if (file == null) {
throw new IOException(
"invalid arguments to createHardLink: source file is null");
}
if (linkName == null) {
throw new IOException(
"invalid arguments to createHardLink: link name is null");
}
createLink(linkName.toPath(), file.toPath());
}
/**
* Creates hardlinks from multiple existing files within one parent
* directory, into one target directory.
* @param parentDir - directory containing source files
* @param fileBaseNames - list of path-less file names, as returned by
* parentDir.list()
* @param linkDir - where the hardlinks should be put. It must already exist.
*/
public static void createHardLinkMult(File parentDir, String[] fileBaseNames,
File linkDir) throws IOException {
if (parentDir == null) {
throw new IOException(
"invalid arguments to createHardLinkMult: parent directory is null");
}
if (linkDir == null) {
throw new IOException(
"invalid arguments to createHardLinkMult: link directory is null");
}
if (fileBaseNames == null) {
throw new IOException(
"invalid arguments to createHardLinkMult: "
+ "filename list can be empty but not null");
}
if (!linkDir.exists()) {
throw new FileNotFoundException(linkDir + " not found.");
}
for (String name : fileBaseNames) {
createLink(linkDir.toPath().resolve(name),
parentDir.toPath().resolve(name));
}
}
/**
* Retrieves the number of links to the specified file.
*/
public static int getLinkCount(File fileName) throws IOException {
if (fileName == null) {
throw new IOException(
"invalid argument to getLinkCount: file name is null");
}
if (!fileName.exists()) {
throw new FileNotFoundException(fileName + " not found.");
}
// construct and execute shell command
String[] cmd = getHardLinkCommand.linkCount(fileName);
String inpMsg = null;
String errMsg = null;
int exitValue = -1;
BufferedReader in = null;
ShellCommandExecutor shexec = new ShellCommandExecutor(cmd);
try {
shexec.execute();
in = new BufferedReader(new StringReader(shexec.getOutput()));
inpMsg = in.readLine();
exitValue = shexec.getExitCode();
if (inpMsg == null || exitValue != 0) {
throw createIOException(fileName, inpMsg, errMsg, exitValue, null);
}
if (Shell.SOLARIS) {
String[] result = inpMsg.split("\\s+");
return Integer.parseInt(result[1]);
} else {
return Integer.parseInt(inpMsg);
}
} catch (ExitCodeException e) {
inpMsg = shexec.getOutput();
errMsg = e.getMessage();
exitValue = e.getExitCode();
throw createIOException(fileName, inpMsg, errMsg, exitValue, e);
} catch (NumberFormatException e) {
throw createIOException(fileName, inpMsg, errMsg, exitValue, e);
} finally {
IOUtils.closeStream(in);
}
}
/* Create an IOException for failing to get link count. */
private static IOException createIOException(File f, String message,
String error, int exitvalue, Exception cause) {
final String s = "Failed to get link count on file " + f
+ ": message=" + message
+ "; error=" + error
+ "; exit value=" + exitvalue;
return (cause == null) ? new IOException(s) : new IOException(s, cause);
}
/**
* HardLink statistics counters and methods.
* Not multi-thread safe, obviously.
* Init is called during HardLink instantiation, above.
*
* These are intended for use by knowledgeable clients, not internally,
* because many of the internal methods are static and can't update these
* per-instance counters.
*/
public static class LinkStats {
public int countDirs = 0;
public int countSingleLinks = 0;
public int countMultLinks = 0;
public int countFilesMultLinks = 0;
public int countEmptyDirs = 0;
public int countPhysicalFileCopies = 0;
public void clear() {
countDirs = 0;
countSingleLinks = 0;
countMultLinks = 0;
countFilesMultLinks = 0;
countEmptyDirs = 0;
countPhysicalFileCopies = 0;
}
public String report() {
return "HardLinkStats: " + countDirs + " Directories, including "
+ countEmptyDirs + " Empty Directories, "
+ countSingleLinks
+ " single Link operations, " + countMultLinks
+ " multi-Link operations, linking " + countFilesMultLinks
+ " files, total " + (countSingleLinks + countFilesMultLinks)
+ " linkable files. Also physically copied "
+ countPhysicalFileCopies + " other files.";
}
}
}
| 10,165 | 33.578231 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanSetDropBehind.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface CanSetDropBehind {
/**
* Configure whether the stream should drop the cache.
*
* @param dropCache Whether to drop the cache. null means to use the
* default value.
* @throws IOException If there was an error changing the dropBehind
* setting.
* UnsupportedOperationException If this stream doesn't support
* setting the drop-behind.
*/
public void setDropBehind(Boolean dropCache)
throws IOException, UnsupportedOperationException;
}
| 1,628 | 37.785714 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Used primarily by {@link FileContext} to operate on and resolve
* symlinks in a path. Operations can potentially span multiple
* {@link AbstractFileSystem}s.
*
* @see FileSystemLinkResolver
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class FSLinkResolver<T> {
/**
* Return a fully-qualified version of the given symlink target if it
* has no scheme and authority. Partially and fully-qualified paths
* are returned unmodified.
* @param pathURI URI of the filesystem of pathWithLink
* @param pathWithLink Path that contains the symlink
* @param target The symlink's absolute target
* @return Fully qualified version of the target.
*/
public static Path qualifySymlinkTarget(final URI pathURI,
Path pathWithLink, Path target) {
// NB: makeQualified uses the target's scheme and authority, if
// specified, and the scheme and authority of pathURI, if not.
final URI targetUri = target.toUri();
final String scheme = targetUri.getScheme();
final String auth = targetUri.getAuthority();
return (scheme == null && auth == null) ? target.makeQualified(pathURI,
pathWithLink.getParent()) : target;
}
/**
* Generic helper function overridden on instantiation to perform a
* specific operation on the given file system using the given path
* which may result in an UnresolvedLinkException.
* @param fs AbstractFileSystem to perform the operation on.
* @param p Path given the file system.
* @return Generic type determined by the specific implementation.
* @throws UnresolvedLinkException If symbolic link <code>path</code> could
* not be resolved
* @throws IOException an I/O error occurred
*/
abstract public T next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException;
/**
* Performs the operation specified by the next function, calling it
* repeatedly until all symlinks in the given path are resolved.
* @param fc FileContext used to access file systems.
* @param path The path to resolve symlinks on.
* @return Generic type determined by the implementation of next.
* @throws IOException
*/
public T resolve(final FileContext fc, final Path path) throws IOException {
int count = 0;
T in = null;
Path p = path;
// NB: More than one AbstractFileSystem can match a scheme, eg
// "file" resolves to LocalFs but could have come by RawLocalFs.
AbstractFileSystem fs = fc.getFSofPath(p);
// Loop until all symlinks are resolved or the limit is reached
for (boolean isLink = true; isLink;) {
try {
in = next(fs, p);
isLink = false;
} catch (UnresolvedLinkException e) {
if (!fc.resolveSymlinks) {
throw new IOException("Path " + path + " contains a symlink"
+ " and symlink resolution is disabled ("
+ CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY + ").", e);
}
if (!FileSystem.areSymlinksEnabled()) {
throw new IOException("Symlink resolution is disabled in"
+ " this version of Hadoop.");
}
if (count++ > FsConstants.MAX_PATH_LINKS) {
throw new IOException("Possible cyclic loop while " +
"following symbolic link " + path);
}
// Resolve the first unresolved path component
p = qualifySymlinkTarget(fs.getUri(), p, fs.getLinkTarget(p));
fs = fc.getFSofPath(p);
}
}
return in;
}
}
| 4,574 | 39.131579 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystemConfigKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This class contains constants for configuration keys used
* in the local file system, raw local fs and checksum fs.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class LocalFileSystemConfigKeys extends CommonConfigurationKeys {
public static final String LOCAL_FS_BLOCK_SIZE_KEY = "file.blocksize";
public static final long LOCAL_FS_BLOCK_SIZE_DEFAULT = 64*1024*1024;
public static final String LOCAL_FS_REPLICATION_KEY = "file.replication";
public static final short LOCAL_FS_REPLICATION_DEFAULT = 1;
public static final String LOCAL_FS_STREAM_BUFFER_SIZE_KEY =
"file.stream-buffer-size";
public static final int LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
public static final String LOCAL_FS_BYTES_PER_CHECKSUM_KEY =
"file.bytes-per-checksum";
public static final int LOCAL_FS_BYTES_PER_CHECKSUM_DEFAULT = 512;
public static final String LOCAL_FS_CLIENT_WRITE_PACKET_SIZE_KEY =
"file.client-write-packet-size";
public static final int LOCAL_FS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
}
| 2,190 | 45.617021 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.NullScope;
import org.apache.htrace.TraceScope;
import java.io.IOException;
import java.io.OutputStream;
import java.util.zip.Checksum;
/**
* This is a generic output stream for generating checksums for
* data before it is written to the underlying stream
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable
abstract public class FSOutputSummer extends OutputStream {
// data checksum
private final DataChecksum sum;
// internal buffer for storing data before it is checksumed
private byte buf[];
// internal buffer for storing checksum
private byte checksum[];
// The number of valid bytes in the buffer.
private int count;
// We want this value to be a multiple of 3 because the native code checksums
// 3 chunks simultaneously. The chosen value of 9 strikes a balance between
// limiting the number of JNI calls and flushing to the underlying stream
// relatively frequently.
private static final int BUFFER_NUM_CHUNKS = 9;
protected FSOutputSummer(DataChecksum sum) {
this.sum = sum;
this.buf = new byte[sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS];
this.checksum = new byte[getChecksumSize() * BUFFER_NUM_CHUNKS];
this.count = 0;
}
/* write the data chunk in <code>b</code> staring at <code>offset</code> with
* a length of <code>len > 0</code>, and its checksum
*/
protected abstract void writeChunk(byte[] b, int bOffset, int bLen,
byte[] checksum, int checksumOffset, int checksumLen) throws IOException;
/**
* Check if the implementing OutputStream is closed and should no longer
* accept writes. Implementations should do nothing if this stream is not
* closed, and should throw an {@link IOException} if it is closed.
*
* @throws IOException if this stream is already closed.
*/
protected abstract void checkClosed() throws IOException;
/** Write one byte */
@Override
public synchronized void write(int b) throws IOException {
buf[count++] = (byte)b;
if(count == buf.length) {
flushBuffer();
}
}
/**
* Writes <code>len</code> bytes from the specified byte array
* starting at offset <code>off</code> and generate a checksum for
* each data chunk.
*
* <p> This method stores bytes from the given array into this
* stream's buffer before it gets checksumed. The buffer gets checksumed
* and flushed to the underlying output stream when all data
* in a checksum chunk are in the buffer. If the buffer is empty and
* requested length is at least as large as the size of next checksum chunk
* size, this method will checksum and write the chunk directly
* to the underlying output stream. Thus it avoids uneccessary data copy.
*
* @param b the data.
* @param off the start offset in the data.
* @param len the number of bytes to write.
* @exception IOException if an I/O error occurs.
*/
@Override
public synchronized void write(byte b[], int off, int len)
throws IOException {
checkClosed();
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
for (int n=0;n<len;n+=write1(b, off+n, len-n)) {
}
}
/**
* Write a portion of an array, flushing to the underlying
* stream at most once if necessary.
*/
private int write1(byte b[], int off, int len) throws IOException {
if(count==0 && len>=buf.length) {
// local buffer is empty and user buffer size >= local buffer size, so
// simply checksum the user buffer and send it directly to the underlying
// stream
final int length = buf.length;
writeChecksumChunks(b, off, length);
return length;
}
// copy user data to local buffer
int bytesToCopy = buf.length-count;
bytesToCopy = (len<bytesToCopy) ? len : bytesToCopy;
System.arraycopy(b, off, buf, count, bytesToCopy);
count += bytesToCopy;
if (count == buf.length) {
// local buffer is full
flushBuffer();
}
return bytesToCopy;
}
/* Forces any buffered output bytes to be checksumed and written out to
* the underlying output stream.
*/
protected synchronized void flushBuffer() throws IOException {
flushBuffer(false, true);
}
/* Forces buffered output bytes to be checksummed and written out to
* the underlying output stream. If there is a trailing partial chunk in the
* buffer,
* 1) flushPartial tells us whether to flush that chunk
* 2) if flushPartial is true, keep tells us whether to keep that chunk in the
* buffer (if flushPartial is false, it is always kept in the buffer)
*
* Returns the number of bytes that were flushed but are still left in the
* buffer (can only be non-zero if keep is true).
*/
protected synchronized int flushBuffer(boolean keep,
boolean flushPartial) throws IOException {
int bufLen = count;
int partialLen = bufLen % sum.getBytesPerChecksum();
int lenToFlush = flushPartial ? bufLen : bufLen - partialLen;
if (lenToFlush != 0) {
writeChecksumChunks(buf, 0, lenToFlush);
if (!flushPartial || keep) {
count = partialLen;
System.arraycopy(buf, bufLen - count, buf, 0, count);
} else {
count = 0;
}
}
// total bytes left minus unflushed bytes left
return count - (bufLen - lenToFlush);
}
/**
* Checksums all complete data chunks and flushes them to the underlying
* stream. If there is a trailing partial chunk, it is not flushed and is
* maintained in the buffer.
*/
public void flush() throws IOException {
flushBuffer(false, false);
}
/**
* Return the number of valid bytes currently in the buffer.
*/
protected synchronized int getBufferedDataSize() {
return count;
}
/** @return the size for a checksum. */
protected int getChecksumSize() {
return sum.getChecksumSize();
}
protected TraceScope createWriteTraceScope() {
return NullScope.INSTANCE;
}
/** Generate checksums for the given data chunks and output chunks & checksums
* to the underlying output stream.
*/
private void writeChecksumChunks(byte b[], int off, int len)
throws IOException {
sum.calculateChunkedSums(b, off, len, checksum, 0);
TraceScope scope = createWriteTraceScope();
try {
for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
writeChunk(b, off + i, chunkLen, checksum, ckOffset,
getChecksumSize());
}
} finally {
scope.close();
}
}
/**
* Converts a checksum integer value to a byte stream
*/
static public byte[] convertToByteStream(Checksum sum, int checksumSize) {
return int2byte((int)sum.getValue(), new byte[checksumSize]);
}
static byte[] int2byte(int integer, byte[] bytes) {
if (bytes.length != 0) {
bytes[0] = (byte) ((integer >>> 24) & 0xFF);
bytes[1] = (byte) ((integer >>> 16) & 0xFF);
bytes[2] = (byte) ((integer >>> 8) & 0xFF);
bytes[3] = (byte) ((integer >>> 0) & 0xFF);
return bytes;
}
return bytes;
}
/**
* Resets existing buffer with a new one of the specified size.
*/
protected synchronized void setChecksumBufSize(int size) {
this.buf = new byte[size];
this.checksum = new byte[sum.getChecksumSize(size)];
this.count = 0;
}
protected synchronized void resetChecksumBufSize() {
setChecksumBufSize(sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS);
}
}
| 8,676 | 33.296443 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.LinkedList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.ChmodParser;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.fs.shell.FsCommand;
import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.util.Shell;
/**
* This class is the home for file permissions related commands.
* Moved to this separate class since FsShell is getting too large.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class FsShellPermissions extends FsCommand {
static Log LOG = FsShell.LOG;
/**
* Register the permission related commands with the factory
* @param factory the command factory
*/
public static void registerCommands(CommandFactory factory) {
factory.addClass(Chmod.class, "-chmod");
factory.addClass(Chown.class, "-chown");
factory.addClass(Chgrp.class, "-chgrp");
}
/**
* The pattern is almost as flexible as mode allowed by chmod shell command.
* The main restriction is that we recognize only rwxXt. To reduce errors we
* also enforce octal mode specifications of either 3 digits without a sticky
* bit setting or four digits with a sticky bit setting.
*/
public static class Chmod extends FsShellPermissions {
public static final String NAME = "chmod";
public static final String USAGE = "[-R] <MODE[,MODE]... | OCTALMODE> PATH...";
public static final String DESCRIPTION =
"Changes permissions of a file. " +
"This works similar to the shell's chmod command with a few exceptions.\n" +
"-R: modifies the files recursively. This is the only option" +
" currently supported.\n" +
"<MODE>: Mode is the same as mode used for the shell's command. " +
"The only letters recognized are 'rwxXt', e.g. +t,a+r,g-w,+rwx,o=r.\n" +
"<OCTALMODE>: Mode specifed in 3 or 4 digits. If 4 digits, the first " +
"may be 1 or 0 to turn the sticky bit on or off, respectively. Unlike " +
"the shell command, it is not possible to specify only part of the " +
"mode, e.g. 754 is same as u=rwx,g=rx,o=r.\n\n" +
"If none of 'augo' is specified, 'a' is assumed and unlike the " +
"shell command, no umask is applied.";
protected ChmodParser pp;
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "R", null);
cf.parse(args);
setRecursive(cf.getOpt("R"));
String modeStr = args.removeFirst();
try {
pp = new ChmodParser(modeStr);
} catch (IllegalArgumentException iea) {
// TODO: remove "chmod : " so it's not doubled up in output, but it's
// here for backwards compatibility...
throw new IllegalArgumentException(
"chmod : mode '" + modeStr + "' does not match the expected pattern.");
}
}
@Override
protected void processPath(PathData item) throws IOException {
short newperms = pp.applyNewPermission(item.stat);
if (item.stat.getPermission().toShort() != newperms) {
try {
item.fs.setPermission(item.path, new FsPermission(newperms));
} catch (IOException e) {
LOG.debug("Error changing permissions of " + item, e);
throw new IOException(
"changing permissions of '" + item + "': " + e.getMessage());
}
}
}
}
// used by chown/chgrp
static private String allowedChars = Shell.WINDOWS ? "[-_./@a-zA-Z0-9 ]" :
"[-_./@a-zA-Z0-9]";
/**
* Used to change owner and/or group of files
*/
public static class Chown extends FsShellPermissions {
public static final String NAME = "chown";
public static final String USAGE = "[-R] [OWNER][:[GROUP]] PATH...";
public static final String DESCRIPTION =
"Changes owner and group of a file. " +
"This is similar to the shell's chown command with a few exceptions.\n" +
"-R: modifies the files recursively. This is the only option " +
"currently supported.\n\n" +
"If only the owner or group is specified, then only the owner or " +
"group is modified. " +
"The owner and group names may only consist of digits, alphabet, "+
"and any of " + allowedChars + ". The names are case sensitive.\n\n" +
"WARNING: Avoid using '.' to separate user name and group though " +
"Linux allows it. If user names have dots in them and you are " +
"using local file system, you might see surprising results since " +
"the shell command 'chown' is used for local files.";
///allows only "allowedChars" above in names for owner and group
static private final Pattern chownPattern = Pattern.compile(
"^\\s*(" + allowedChars + "+)?([:](" + allowedChars + "*))?\\s*$");
protected String owner = null;
protected String group = null;
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "R");
cf.parse(args);
setRecursive(cf.getOpt("R"));
parseOwnerGroup(args.removeFirst());
}
/**
* Parse the first argument into an owner and group
* @param ownerStr string describing new ownership
*/
protected void parseOwnerGroup(String ownerStr) {
Matcher matcher = chownPattern.matcher(ownerStr);
if (!matcher.matches()) {
throw new IllegalArgumentException(
"'" + ownerStr + "' does not match expected pattern for [owner][:group].");
}
owner = matcher.group(1);
group = matcher.group(3);
if (group != null && group.length() == 0) {
group = null;
}
if (owner == null && group == null) {
throw new IllegalArgumentException(
"'" + ownerStr + "' does not specify owner or group.");
}
}
@Override
protected void processPath(PathData item) throws IOException {
//Should we do case insensitive match?
String newOwner = (owner == null || owner.equals(item.stat.getOwner())) ?
null : owner;
String newGroup = (group == null || group.equals(item.stat.getGroup())) ?
null : group;
if (newOwner != null || newGroup != null) {
try {
item.fs.setOwner(item.path, newOwner, newGroup);
} catch (IOException e) {
LOG.debug("Error changing ownership of " + item, e);
throw new IOException(
"changing ownership of '" + item + "': " + e.getMessage());
}
}
}
}
/**
* Used to change group of files
*/
public static class Chgrp extends Chown {
public static final String NAME = "chgrp";
public static final String USAGE = "[-R] GROUP PATH...";
public static final String DESCRIPTION =
"This is equivalent to -chown ... :GROUP ...";
static private final Pattern chgrpPattern =
Pattern.compile("^\\s*(" + allowedChars + "+)\\s*$");
@Override
protected void parseOwnerGroup(String groupStr) {
Matcher matcher = chgrpPattern.matcher(groupStr);
if (!matcher.matches()) {
throw new IllegalArgumentException(
"'" + groupStr + "' does not match expected pattern for group");
}
owner = null;
group = matcher.group(1);
}
}
}
| 8,546 | 38.387097 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanSetReadahead.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface CanSetReadahead {
/**
* Set the readahead on this stream.
*
* @param readahead The readahead to use. null means to use the default.
* @throws IOException If there was an error changing the dropBehind
* setting.
* UnsupportedOperationException If this stream doesn't support
* setting readahead.
*/
public void setReadahead(Long readahead)
throws IOException, UnsupportedOperationException;
}
| 1,560 | 37.073171 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.http.lib.StaticUserWebFilter;
/**
* This class contains constants for configuration keys used
* in the common code.
*
* It inherits all the publicly documented configuration keys
* and adds unsupported keys.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
/** Default location for user home directories */
public static final String FS_HOME_DIR_KEY = "fs.homeDir";
/** Default value for FS_HOME_DIR_KEY */
public static final String FS_HOME_DIR_DEFAULT = "/user";
/** Default umask for files created in HDFS */
public static final String FS_PERMISSIONS_UMASK_KEY =
"fs.permissions.umask-mode";
/** Default value for FS_PERMISSIONS_UMASK_KEY */
public static final int FS_PERMISSIONS_UMASK_DEFAULT = 0022;
/** How often does RPC client send pings to RPC server */
public static final String IPC_PING_INTERVAL_KEY = "ipc.ping.interval";
/** Default value for IPC_PING_INTERVAL_KEY */
public static final int IPC_PING_INTERVAL_DEFAULT = 60000; // 1 min
/** Enables pings from RPC client to the server */
public static final String IPC_CLIENT_PING_KEY = "ipc.client.ping";
/** Default value of IPC_CLIENT_PING_KEY */
public static final boolean IPC_CLIENT_PING_DEFAULT = true;
/** Responses larger than this will be logged */
public static final String IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY =
"ipc.server.max.response.size";
/** Default value for IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY */
public static final int IPC_SERVER_RPC_MAX_RESPONSE_SIZE_DEFAULT =
1024*1024;
/** Number of threads in RPC server reading from the socket */
public static final String IPC_SERVER_RPC_READ_THREADS_KEY =
"ipc.server.read.threadpool.size";
/** Default value for IPC_SERVER_RPC_READ_THREADS_KEY */
public static final int IPC_SERVER_RPC_READ_THREADS_DEFAULT = 1;
/** Number of pending connections that may be queued per socket reader */
public static final String IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_KEY =
"ipc.server.read.connection-queue.size";
/** Default value for IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE */
public static final int IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_DEFAULT =
100;
public static final String IPC_MAXIMUM_DATA_LENGTH =
"ipc.maximum.data.length";
public static final int IPC_MAXIMUM_DATA_LENGTH_DEFAULT = 64 * 1024 * 1024;
/** How many calls per handler are allowed in the queue. */
public static final String IPC_SERVER_HANDLER_QUEUE_SIZE_KEY =
"ipc.server.handler.queue.size";
/** Default value for IPC_SERVER_HANDLER_QUEUE_SIZE_KEY */
public static final int IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT = 100;
/**
* CallQueue related settings. These are not used directly, but rather
* combined with a namespace and port. For instance:
* IPC_CALLQUEUE_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
*/
public static final String IPC_CALLQUEUE_NAMESPACE = "ipc";
public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";
public static final String IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY = "identity-provider.impl";
public static final String IPC_BACKOFF_ENABLE = "backoff.enable";
public static final boolean IPC_BACKOFF_ENABLE_DEFAULT = false;
/** This is for specifying the implementation for the mappings from
* hostnames to the racks they belong to
*/
public static final String NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY =
"net.topology.configured.node.mapping";
/**
* Supported compression codec classes
*/
public static final String IO_COMPRESSION_CODECS_KEY = "io.compression.codecs";
/** Internal buffer size for Lzo compressor/decompressors */
public static final String IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY =
"io.compression.codec.lzo.buffersize";
/** Default value for IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY */
public static final int IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT =
64*1024;
/** Internal buffer size for Snappy compressor/decompressors */
public static final String IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY =
"io.compression.codec.snappy.buffersize";
/** Default value for IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY */
public static final int IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT =
256 * 1024;
/** Internal buffer size for Lz4 compressor/decompressors */
public static final String IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY =
"io.compression.codec.lz4.buffersize";
/** Default value for IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY */
public static final int IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT =
256 * 1024;
/** Use lz4hc(slow but with high compression ratio) for lz4 compression */
public static final String IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY =
"io.compression.codec.lz4.use.lz4hc";
/** Default value for IO_COMPRESSION_CODEC_USELZ4HC_KEY */
public static final boolean IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT =
false;
/**
* Service Authorization
*/
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL =
"security.service.authorization.default.acl";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL =
"security.service.authorization.default.acl.blocked";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY =
"security.refresh.policy.protocol.acl";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_GET_USER_MAPPINGS =
"security.get.user.mappings.protocol.acl";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_USER_MAPPINGS =
"security.refresh.user.mappings.protocol.acl";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_CALLQUEUE =
"security.refresh.callqueue.protocol.acl";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_GENERIC_REFRESH =
"security.refresh.generic.protocol.acl";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_TRACING =
"security.trace.protocol.acl";
public static final String
SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
public static final String
SECURITY_ZKFC_PROTOCOL_ACL = "security.zkfc.protocol.acl";
public static final String
SECURITY_CLIENT_PROTOCOL_ACL = "security.client.protocol.acl";
public static final String SECURITY_CLIENT_DATANODE_PROTOCOL_ACL =
"security.client.datanode.protocol.acl";
public static final String
SECURITY_DATANODE_PROTOCOL_ACL = "security.datanode.protocol.acl";
public static final String
SECURITY_INTER_DATANODE_PROTOCOL_ACL = "security.inter.datanode.protocol.acl";
public static final String
SECURITY_NAMENODE_PROTOCOL_ACL = "security.namenode.protocol.acl";
public static final String SECURITY_QJOURNAL_SERVICE_PROTOCOL_ACL =
"security.qjournal.service.protocol.acl";
public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
"hadoop.security.token.service.use_ip";
public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT =
true;
/**
* HA health monitor and failover controller.
*/
/** How often to retry connecting to the service. */
public static final String HA_HM_CONNECT_RETRY_INTERVAL_KEY =
"ha.health-monitor.connect-retry-interval.ms";
public static final long HA_HM_CONNECT_RETRY_INTERVAL_DEFAULT = 1000;
/* How often to check the service. */
public static final String HA_HM_CHECK_INTERVAL_KEY =
"ha.health-monitor.check-interval.ms";
public static final long HA_HM_CHECK_INTERVAL_DEFAULT = 1000;
/* How long to sleep after an unexpected RPC error. */
public static final String HA_HM_SLEEP_AFTER_DISCONNECT_KEY =
"ha.health-monitor.sleep-after-disconnect.ms";
public static final long HA_HM_SLEEP_AFTER_DISCONNECT_DEFAULT = 1000;
/* Timeout for the actual monitorHealth() calls. */
public static final String HA_HM_RPC_TIMEOUT_KEY =
"ha.health-monitor.rpc-timeout.ms";
public static final int HA_HM_RPC_TIMEOUT_DEFAULT = 45000;
/* Timeout that the FC waits for the new active to become active */
public static final String HA_FC_NEW_ACTIVE_TIMEOUT_KEY =
"ha.failover-controller.new-active.rpc-timeout.ms";
public static final int HA_FC_NEW_ACTIVE_TIMEOUT_DEFAULT = 60000;
/* Timeout that the FC waits for the old active to go to standby */
public static final String HA_FC_GRACEFUL_FENCE_TIMEOUT_KEY =
"ha.failover-controller.graceful-fence.rpc-timeout.ms";
public static final int HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT = 5000;
/* FC connection retries for graceful fencing */
public static final String HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES =
"ha.failover-controller.graceful-fence.connection.retries";
public static final int HA_FC_GRACEFUL_FENCE_CONNECTION_RETRIES_DEFAULT = 1;
/** number of zookeeper operation retry times in ActiveStandbyElector */
public static final String HA_FC_ELECTOR_ZK_OP_RETRIES_KEY =
"ha.failover-controller.active-standby-elector.zk.op.retries";
public static final int HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT = 3;
/* Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState */
public static final String HA_FC_CLI_CHECK_TIMEOUT_KEY =
"ha.failover-controller.cli-check.rpc-timeout.ms";
public static final int HA_FC_CLI_CHECK_TIMEOUT_DEFAULT = 20000;
/** Static user web-filter properties.
* See {@link StaticUserWebFilter}.
*/
public static final String HADOOP_HTTP_STATIC_USER =
"hadoop.http.staticuser.user";
public static final String DEFAULT_HADOOP_HTTP_STATIC_USER =
"dr.who";
/**
* User->groups static mapping to override the groups lookup
*/
public static final String HADOOP_USER_GROUP_STATIC_OVERRIDES =
"hadoop.user.group.static.mapping.overrides";
public static final String HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT =
"dr.who=;";
/** Enable/Disable aliases serving from jetty */
public static final String HADOOP_JETTY_LOGS_SERVE_ALIASES =
"hadoop.jetty.logs.serve.aliases";
public static final boolean DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES =
true;
/* Path to the Kerberos ticket cache. Setting this will force
* UserGroupInformation to use only this ticket cache file when creating a
* FileSystem instance.
*/
public static final String KERBEROS_TICKET_CACHE_PATH =
"hadoop.security.kerberos.ticket.cache.path";
public static final String HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_KEY =
"hadoop.security.uid.cache.secs";
public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
4*60*60; // 4 hours
public static final String IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY = "ipc.client.fallback-to-simple-auth-allowed";
public static final boolean IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT = false;
public static final String IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY =
"ipc.client.connect.max.retries.on.sasl";
public static final int IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT = 5;
/** How often the server scans for idle connections */
public static final String IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY =
"ipc.client.connection.idle-scan-interval.ms";
/** Default value for IPC_SERVER_CONNECTION_IDLE_SCAN_INTERVAL_KEY */
public static final int IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_DEFAULT =
10000;
public static final String HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS =
"hadoop.user.group.metrics.percentiles.intervals";
public static final String RPC_METRICS_QUANTILE_ENABLE =
"rpc.metrics.quantile.enable";
public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;
public static final String RPC_METRICS_PERCENTILES_INTERVALS_KEY =
"rpc.metrics.percentiles.intervals";
/** Allowed hosts for nfs exports */
public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
}
| 13,277 | 43.408027 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32GzipFileChecksum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.util.DataChecksum;
/** For CRC32 with the Gzip polynomial */
public class MD5MD5CRC32GzipFileChecksum extends MD5MD5CRC32FileChecksum {
/** Same as this(0, 0, null) */
public MD5MD5CRC32GzipFileChecksum() {
this(0, 0, null);
}
/** Create a MD5FileChecksum */
public MD5MD5CRC32GzipFileChecksum(int bytesPerCRC, long crcPerBlock, MD5Hash md5) {
super(bytesPerCRC, crcPerBlock, md5);
}
@Override
public DataChecksum.Type getCrcType() {
// default to the one that is understood by all releases.
return DataChecksum.Type.CRC32;
}
}
| 1,465 | 34.756098 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIsNotEmptyDirectoryException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/** Generated by rm commands */
public class PathIsNotEmptyDirectoryException extends PathExistsException {
/** @param path for the exception */
public PathIsNotEmptyDirectoryException(String path) {
super(path, "Directory is not empty");
}
}
| 1,090 | 40.961538 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.commons.codec.DecoderException;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.codec.binary.Hex;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Preconditions;
/**
* The value of <code>XAttr</code> is byte[], this class is to
* covert byte[] to some kind of string representation or convert back.
* String representation is convenient for display and input. For example
* display in screen as shell response and json response, input as http
* or shell parameter.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public enum XAttrCodec {
/**
* Value encoded as text
* string is enclosed in double quotes (\").
*/
TEXT,
/**
* Value encoded as hexadecimal string
* is prefixed with 0x.
*/
HEX,
/**
* Value encoded as base64 string
* is prefixed with 0s.
*/
BASE64;
private static final String HEX_PREFIX = "0x";
private static final String BASE64_PREFIX = "0s";
private static final Base64 base64 = new Base64(0);
/**
* Decode string representation of a value and check whether it's
* encoded. If the given string begins with 0x or 0X, it expresses
* a hexadecimal number. If the given string begins with 0s or 0S,
* base64 encoding is expected. If the given string is enclosed in
* double quotes, the inner string is treated as text. Otherwise
* the given string is treated as text.
* @param value string representation of the value.
* @return byte[] the value
* @throws IOException
*/
public static byte[] decodeValue(String value) throws IOException {
byte[] result = null;
if (value != null) {
if (value.length() >= 2) {
String en = value.substring(0, 2);
if (value.startsWith("\"") && value.endsWith("\"")) {
value = value.substring(1, value.length()-1);
result = value.getBytes("utf-8");
} else if (en.equalsIgnoreCase(HEX_PREFIX)) {
value = value.substring(2, value.length());
try {
result = Hex.decodeHex(value.toCharArray());
} catch (DecoderException e) {
throw new IOException(e);
}
} else if (en.equalsIgnoreCase(BASE64_PREFIX)) {
value = value.substring(2, value.length());
result = base64.decode(value);
}
}
if (result == null) {
result = value.getBytes("utf-8");
}
}
return result;
}
/**
* Encode byte[] value to string representation with encoding.
* Values encoded as text strings are enclosed in double quotes (\"),
* while strings encoded as hexadecimal and base64 are prefixed with
* 0x and 0s, respectively.
* @param value byte[] value
* @param encoding
* @return String string representation of value
* @throws IOException
*/
public static String encodeValue(byte[] value, XAttrCodec encoding)
throws IOException {
Preconditions.checkNotNull(value, "Value can not be null.");
if (encoding == HEX) {
return HEX_PREFIX + Hex.encodeHexString(value);
} else if (encoding == BASE64) {
return BASE64_PREFIX + base64.encodeToString(value);
} else {
return "\"" + new String(value, "utf-8") + "\"";
}
}
}
| 4,221 | 33.606557 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.io.Writable;
/** An abstract class representing file checksums for files. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class FileChecksum implements Writable {
/** The checksum algorithm name */
public abstract String getAlgorithmName();
/** The length of the checksum in bytes */
public abstract int getLength();
/** The value of the checksum in bytes */
public abstract byte[] getBytes();
public ChecksumOpt getChecksumOpt() {
return null;
}
/** Return true if both the algorithms and the values are the same. */
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other == null || !(other instanceof FileChecksum)) {
return false;
}
final FileChecksum that = (FileChecksum)other;
return this.getAlgorithmName().equals(that.getAlgorithmName())
&& Arrays.equals(this.getBytes(), that.getBytes());
}
@Override
public int hashCode() {
return getAlgorithmName().hashCode() ^ Arrays.hashCode(getBytes());
}
}
| 2,112 | 32.539683 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIsDirectoryException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/** EISDIR */
public class PathIsDirectoryException extends PathExistsException {
static final long serialVersionUID = 0L;
/** @param path for the exception */
public PathIsDirectoryException(String path) {
super(path, "Is a directory");
}
}
| 1,091 | 39.444444 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A class for POSIX glob pattern with brace expansions.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class GlobPattern {
private static final char BACKSLASH = '\\';
private Pattern compiled;
private boolean hasWildcard = false;
/**
* Construct the glob pattern object with a glob pattern string
* @param globPattern the glob pattern string
*/
public GlobPattern(String globPattern) {
set(globPattern);
}
/**
* @return the compiled pattern
*/
public Pattern compiled() {
return compiled;
}
/**
* Compile glob pattern string
* @param globPattern the glob pattern
* @return the pattern object
*/
public static Pattern compile(String globPattern) {
return new GlobPattern(globPattern).compiled();
}
/**
* Match input against the compiled glob pattern
* @param s input chars
* @return true for successful matches
*/
public boolean matches(CharSequence s) {
return compiled.matcher(s).matches();
}
/**
* Set and compile a glob pattern
* @param glob the glob pattern string
*/
public void set(String glob) {
StringBuilder regex = new StringBuilder();
int setOpen = 0;
int curlyOpen = 0;
int len = glob.length();
hasWildcard = false;
for (int i = 0; i < len; i++) {
char c = glob.charAt(i);
switch (c) {
case BACKSLASH:
if (++i >= len) {
error("Missing escaped character", glob, i);
}
regex.append(c).append(glob.charAt(i));
continue;
case '.':
case '$':
case '(':
case ')':
case '|':
case '+':
// escape regex special chars that are not glob special chars
regex.append(BACKSLASH);
break;
case '*':
regex.append('.');
hasWildcard = true;
break;
case '?':
regex.append('.');
hasWildcard = true;
continue;
case '{': // start of a group
regex.append("(?:"); // non-capturing
curlyOpen++;
hasWildcard = true;
continue;
case ',':
regex.append(curlyOpen > 0 ? '|' : c);
continue;
case '}':
if (curlyOpen > 0) {
// end of a group
curlyOpen--;
regex.append(")");
continue;
}
break;
case '[':
if (setOpen > 0) {
error("Unclosed character class", glob, i);
}
setOpen++;
hasWildcard = true;
break;
case '^': // ^ inside [...] can be unescaped
if (setOpen == 0) {
regex.append(BACKSLASH);
}
break;
case '!': // [! needs to be translated to [^
regex.append(setOpen > 0 && '[' == glob.charAt(i - 1) ? '^' : '!');
continue;
case ']':
// Many set errors like [][] could not be easily detected here,
// as []], []-] and [-] are all valid POSIX glob and java regex.
// We'll just let the regex compiler do the real work.
setOpen = 0;
break;
default:
}
regex.append(c);
}
if (setOpen > 0) {
error("Unclosed character class", glob, len);
}
if (curlyOpen > 0) {
error("Unclosed group", glob, len);
}
compiled = Pattern.compile(regex.toString());
}
/**
* @return true if this is a wildcard pattern (with special chars)
*/
public boolean hasWildcard() {
return hasWildcard;
}
private static void error(String message, String pattern, int pos) {
throw new PatternSyntaxException(message, pattern, pos);
}
}
| 4,750 | 26.947059 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ParentNotDirectoryException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Indicates that the parent of specified Path is not a directory
* as expected.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ParentNotDirectoryException extends IOException {
private static final long serialVersionUID = 1L;
public ParentNotDirectoryException() {
super();
}
public ParentNotDirectoryException(String msg) {
super(msg);
}
}
| 1,377 | 31.809524 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.StringUtils;
/** Store the summary of a content (a directory or a file). */
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ContentSummary implements Writable{
private long length;
private long fileCount;
private long directoryCount;
private long quota;
private long spaceConsumed;
private long spaceQuota;
private long typeConsumed[];
private long typeQuota[];
public static class Builder{
public Builder() {
this.quota = -1;
this.spaceQuota = -1;
typeConsumed = new long[StorageType.values().length];
typeQuota = new long[StorageType.values().length];
for (int i = 0; i < typeQuota.length; i++) {
typeQuota[i] = -1;
}
}
public Builder length(long length) {
this.length = length;
return this;
}
public Builder fileCount(long fileCount) {
this.fileCount = fileCount;
return this;
}
public Builder directoryCount(long directoryCount) {
this.directoryCount = directoryCount;
return this;
}
public Builder quota(long quota){
this.quota = quota;
return this;
}
public Builder spaceConsumed(long spaceConsumed) {
this.spaceConsumed = spaceConsumed;
return this;
}
public Builder spaceQuota(long spaceQuota) {
this.spaceQuota = spaceQuota;
return this;
}
public Builder typeConsumed(long typeConsumed[]) {
for (int i = 0; i < typeConsumed.length; i++) {
this.typeConsumed[i] = typeConsumed[i];
}
return this;
}
public Builder typeQuota(StorageType type, long quota) {
this.typeQuota[type.ordinal()] = quota;
return this;
}
public Builder typeConsumed(StorageType type, long consumed) {
this.typeConsumed[type.ordinal()] = consumed;
return this;
}
public Builder typeQuota(long typeQuota[]) {
for (int i = 0; i < typeQuota.length; i++) {
this.typeQuota[i] = typeQuota[i];
}
return this;
}
public ContentSummary build() {
return new ContentSummary(length, fileCount, directoryCount, quota,
spaceConsumed, spaceQuota, typeConsumed, typeQuota);
}
private long length;
private long fileCount;
private long directoryCount;
private long quota;
private long spaceConsumed;
private long spaceQuota;
private long typeConsumed[];
private long typeQuota[];
}
/** Constructor deprecated by ContentSummary.Builder*/
@Deprecated
public ContentSummary() {}
/** Constructor, deprecated by ContentSummary.Builder
* This constructor implicitly set spaceConsumed the same as length.
* spaceConsumed and length must be set explicitly with
* ContentSummary.Builder
* */
@Deprecated
public ContentSummary(long length, long fileCount, long directoryCount) {
this(length, fileCount, directoryCount, -1L, length, -1L);
}
/** Constructor, deprecated by ContentSummary.Builder */
@Deprecated
public ContentSummary(
long length, long fileCount, long directoryCount, long quota,
long spaceConsumed, long spaceQuota) {
this.length = length;
this.fileCount = fileCount;
this.directoryCount = directoryCount;
this.quota = quota;
this.spaceConsumed = spaceConsumed;
this.spaceQuota = spaceQuota;
}
/** Constructor for ContentSummary.Builder*/
private ContentSummary(
long length, long fileCount, long directoryCount, long quota,
long spaceConsumed, long spaceQuota, long typeConsumed[],
long typeQuota[]) {
this.length = length;
this.fileCount = fileCount;
this.directoryCount = directoryCount;
this.quota = quota;
this.spaceConsumed = spaceConsumed;
this.spaceQuota = spaceQuota;
this.typeConsumed = typeConsumed;
this.typeQuota = typeQuota;
}
/** @return the length */
public long getLength() {return length;}
/** @return the directory count */
public long getDirectoryCount() {return directoryCount;}
/** @return the file count */
public long getFileCount() {return fileCount;}
/** Return the directory quota */
public long getQuota() {return quota;}
/** Retuns storage space consumed */
public long getSpaceConsumed() {return spaceConsumed;}
/** Returns storage space quota */
public long getSpaceQuota() {return spaceQuota;}
/** Returns storage type quota */
public long getTypeQuota(StorageType type) {
return (typeQuota != null) ? typeQuota[type.ordinal()] : -1;
}
/** Returns storage type consumed*/
public long getTypeConsumed(StorageType type) {
return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0;
}
/** Returns true if any storage type quota has been set*/
public boolean isTypeQuotaSet() {
if (typeQuota == null) {
return false;
}
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (typeQuota[t.ordinal()] > 0) {
return true;
}
}
return false;
}
/** Returns true if any storage type consumption information is available*/
public boolean isTypeConsumedAvailable() {
if (typeConsumed == null) {
return false;
}
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (typeConsumed[t.ordinal()] > 0) {
return true;
}
}
return false;
}
@Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(length);
out.writeLong(fileCount);
out.writeLong(directoryCount);
out.writeLong(quota);
out.writeLong(spaceConsumed);
out.writeLong(spaceQuota);
}
@Override
@InterfaceAudience.Private
public void readFields(DataInput in) throws IOException {
this.length = in.readLong();
this.fileCount = in.readLong();
this.directoryCount = in.readLong();
this.quota = in.readLong();
this.spaceConsumed = in.readLong();
this.spaceQuota = in.readLong();
}
/**
* Output format:
* <----12----> <----12----> <-------18------->
* DIR_COUNT FILE_COUNT CONTENT_SIZE
*/
private static final String SUMMARY_FORMAT = "%12s %12s %18s ";
/**
* Output format:
* <----12----> <------15-----> <------15-----> <------15----->
* QUOTA REM_QUOTA SPACE_QUOTA REM_SPACE_QUOTA
* <----12----> <----12----> <-------18------->
* DIR_COUNT FILE_COUNT CONTENT_SIZE
*/
private static final String QUOTA_SUMMARY_FORMAT = "%12s %15s ";
private static final String SPACE_QUOTA_SUMMARY_FORMAT = "%15s %15s ";
private static final String STORAGE_TYPE_SUMMARY_FORMAT = "%13s %17s ";
private static final String[] HEADER_FIELDS = new String[] { "DIR_COUNT",
"FILE_COUNT", "CONTENT_SIZE"};
private static final String[] QUOTA_HEADER_FIELDS = new String[] { "QUOTA",
"REM_QUOTA", "SPACE_QUOTA", "REM_SPACE_QUOTA" };
/** The header string */
private static final String HEADER = String.format(
SUMMARY_FORMAT, (Object[]) HEADER_FIELDS);
private static final String QUOTA_HEADER = String.format(
QUOTA_SUMMARY_FORMAT + SPACE_QUOTA_SUMMARY_FORMAT,
(Object[]) QUOTA_HEADER_FIELDS) +
HEADER;
/** default quota display string */
private static final String QUOTA_NONE = "none";
private static final String QUOTA_INF = "inf";
/** Return the header of the output.
* if qOption is false, output directory count, file count, and content size;
* if qOption is true, output quota and remaining quota as well.
*
* @param qOption a flag indicating if quota needs to be printed or not
* @return the header of the output
*/
public static String getHeader(boolean qOption) {
return qOption ? QUOTA_HEADER : HEADER;
}
/**
* return the header of with the StorageTypes
*
* @param storageTypes
* @return storage header string
*/
public static String getStorageTypeHeader(List<StorageType> storageTypes) {
StringBuffer header = new StringBuffer();
for (StorageType st : storageTypes) {
/* the field length is 13/17 for quota and remain quota
* as the max length for quota name is ARCHIVE_QUOTA
* and remain quota name REM_ARCHIVE_QUOTA */
String storageName = st.toString();
header.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT, storageName + "_QUOTA",
"REM_" + storageName + "_QUOTA"));
}
return header.toString();
}
/**
* Returns the names of the fields from the summary header.
*
* @return names of fields as displayed in the header
*/
public static String[] getHeaderFields() {
return HEADER_FIELDS;
}
/**
* Returns the names of the fields used in the quota summary.
*
* @return names of quota fields as displayed in the header
*/
public static String[] getQuotaHeaderFields() {
return QUOTA_HEADER_FIELDS;
}
@Override
public String toString() {
return toString(true);
}
/** Return the string representation of the object in the output format.
* if qOption is false, output directory count, file count, and content size;
* if qOption is true, output quota and remaining quota as well.
*
* @param qOption a flag indicating if quota needs to be printed or not
* @return the string representation of the object
*/
public String toString(boolean qOption) {
return toString(qOption, false);
}
/** Return the string representation of the object in the output format.
* if qOption is false, output directory count, file count, and content size;
* if qOption is true, output quota and remaining quota as well.
* if hOption is false file sizes are returned in bytes
* if hOption is true file sizes are returned in human readable
*
* @param qOption a flag indicating if quota needs to be printed or not
* @param hOption a flag indicating if human readable output if to be used
* @return the string representation of the object
*/
public String toString(boolean qOption, boolean hOption) {
return toString(qOption, hOption, false, null);
}
/**
* Return the string representation of the object in the output format.
* if tOption is true, display the quota by storage types,
* Otherwise, same logic with #toString(boolean,boolean)
*
* @param qOption a flag indicating if quota needs to be printed or not
* @param hOption a flag indicating if human readable output if to be used
* @param tOption a flag indicating if display quota by storage types
* @param types Storage types to display
* @return the string representation of the object
*/
public String toString(boolean qOption, boolean hOption,
boolean tOption, List<StorageType> types) {
String prefix = "";
if (tOption) {
StringBuffer content = new StringBuffer();
for (StorageType st : types) {
long typeQuota = getTypeQuota(st);
long typeConsumed = getTypeConsumed(st);
String quotaStr = QUOTA_NONE;
String quotaRem = QUOTA_INF;
if (typeQuota > 0) {
quotaStr = formatSize(typeQuota, hOption);
quotaRem = formatSize(typeQuota - typeConsumed, hOption);
}
content.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT,
quotaStr, quotaRem));
}
return content.toString();
}
if (qOption) {
String quotaStr = QUOTA_NONE;
String quotaRem = QUOTA_INF;
String spaceQuotaStr = QUOTA_NONE;
String spaceQuotaRem = QUOTA_INF;
if (quota>0) {
quotaStr = formatSize(quota, hOption);
quotaRem = formatSize(quota-(directoryCount+fileCount), hOption);
}
if (spaceQuota>0) {
spaceQuotaStr = formatSize(spaceQuota, hOption);
spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption);
}
prefix = String.format(QUOTA_SUMMARY_FORMAT + SPACE_QUOTA_SUMMARY_FORMAT,
quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
}
return prefix + String.format(SUMMARY_FORMAT,
formatSize(directoryCount, hOption),
formatSize(fileCount, hOption),
formatSize(length, hOption));
}
/**
* Formats a size to be human readable or in bytes
* @param size value to be formatted
* @param humanReadable flag indicating human readable or not
* @return String representation of the size
*/
private String formatSize(long size, boolean humanReadable) {
return humanReadable
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size);
}
}
| 13,662 | 30.922897 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Implementers of this interface provide a read API that writes to a
* ByteBuffer, not a byte[].
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface ByteBufferReadable {
/**
* Reads up to buf.remaining() bytes into buf. Callers should use
* buf.limit(..) to control the size of the desired read.
* <p/>
* After a successful call, buf.position() will be advanced by the number
* of bytes read and buf.limit() should be unchanged.
* <p/>
* In the case of an exception, the values of buf.position() and buf.limit()
* are undefined, and callers should be prepared to recover from this
* eventuality.
* <p/>
* Many implementations will throw {@link UnsupportedOperationException}, so
* callers that are not confident in support for this method from the
* underlying filesystem should be prepared to handle that exception.
* <p/>
* Implementations should treat 0-length requests as legitimate, and must not
* signal an error upon their receipt.
*
* @param buf
* the ByteBuffer to receive the results of the read operation.
* @return the number of bytes read, possibly zero, or -1 if
* reach end-of-stream
* @throws IOException
* if there is some error performing the read
*/
public int read(ByteBuffer buf) throws IOException;
}
| 2,372 | 39.220339 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.NoSuchElementException;
import java.util.StringTokenizer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import com.google.common.annotations.VisibleForTesting;
/** Filesystem disk space usage statistics.
* Uses the unix 'df' program to get mount points, and java.io.File for
* space utilization. Tested on Linux, FreeBSD, Windows. */
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class DF extends Shell {
/** Default DF refresh interval. */
public static final long DF_INTERVAL_DEFAULT = 3 * 1000;
private final String dirPath;
private final File dirFile;
private String filesystem;
private String mount;
private ArrayList<String> output;
public DF(File path, Configuration conf) throws IOException {
this(path, conf.getLong(CommonConfigurationKeys.FS_DF_INTERVAL_KEY, DF.DF_INTERVAL_DEFAULT));
}
public DF(File path, long dfInterval) throws IOException {
super(dfInterval);
this.dirPath = path.getCanonicalPath();
this.dirFile = new File(this.dirPath);
this.output = new ArrayList<String>();
}
/// ACCESSORS
/** @return the canonical path to the volume we're checking. */
public String getDirPath() {
return dirPath;
}
/** @return a string indicating which filesystem volume we're checking. */
public String getFilesystem() throws IOException {
if (Shell.WINDOWS) {
this.filesystem = dirFile.getCanonicalPath().substring(0, 2);
return this.filesystem;
} else {
run();
verifyExitCode();
parseOutput();
return filesystem;
}
}
/** @return the capacity of the measured filesystem in bytes. */
public long getCapacity() {
return dirFile.getTotalSpace();
}
/** @return the total used space on the filesystem in bytes. */
public long getUsed() {
return dirFile.getTotalSpace() - dirFile.getFreeSpace();
}
/** @return the usable space remaining on the filesystem in bytes. */
public long getAvailable() {
return dirFile.getUsableSpace();
}
/** @return the amount of the volume full, as a percent. */
public int getPercentUsed() {
double cap = (double) getCapacity();
double used = (cap - (double) getAvailable());
return (int) (used * 100.0 / cap);
}
/** @return the filesystem mount point for the indicated volume */
public String getMount() throws IOException {
// Abort early if specified path does not exist
if (!dirFile.exists()) {
throw new FileNotFoundException("Specified path " + dirFile.getPath()
+ "does not exist");
}
if (Shell.WINDOWS) {
// Assume a drive letter for a mount point
this.mount = dirFile.getCanonicalPath().substring(0, 2);
} else {
run();
verifyExitCode();
parseOutput();
}
return mount;
}
@Override
public String toString() {
return
"df -k " + mount +"\n" +
filesystem + "\t" +
getCapacity() / 1024 + "\t" +
getUsed() / 1024 + "\t" +
getAvailable() / 1024 + "\t" +
getPercentUsed() + "%\t" +
mount;
}
@Override
protected String[] getExecString() {
// ignoring the error since the exit code it enough
if (Shell.WINDOWS){
throw new AssertionError(
"DF.getExecString() should never be called on Windows");
} else {
return new String[] {"bash","-c","exec 'df' '-k' '-P' '" + dirPath
+ "' 2>/dev/null"};
}
}
@Override
protected void parseExecResult(BufferedReader lines) throws IOException {
output.clear();
String line = lines.readLine();
while (line != null) {
output.add(line);
line = lines.readLine();
}
}
@VisibleForTesting
protected void parseOutput() throws IOException {
if (output.size() < 2) {
StringBuffer sb = new StringBuffer("Fewer lines of output than expected");
if (output.size() > 0) {
sb.append(": " + output.get(0));
}
throw new IOException(sb.toString());
}
String line = output.get(1);
StringTokenizer tokens =
new StringTokenizer(line, " \t\n\r\f%");
try {
this.filesystem = tokens.nextToken();
} catch (NoSuchElementException e) {
throw new IOException("Unexpected empty line");
}
if (!tokens.hasMoreTokens()) { // for long filesystem name
if (output.size() > 2) {
line = output.get(2);
} else {
throw new IOException("Expecting additional output after line: "
+ line);
}
tokens = new StringTokenizer(line, " \t\n\r\f%");
}
try {
Long.parseLong(tokens.nextToken()); // capacity
Long.parseLong(tokens.nextToken()); // used
Long.parseLong(tokens.nextToken()); // available
Integer.parseInt(tokens.nextToken()); // pct used
this.mount = tokens.nextToken();
} catch (NoSuchElementException e) {
throw new IOException("Could not parse line: " + line);
} catch (NumberFormatException e) {
throw new IOException("Could not parse line: " + line);
}
}
private void verifyExitCode() throws IOException {
if (getExitCode() != 0) {
StringBuilder sb =
new StringBuilder("df could not be run successfully: ");
for (String line : output) {
sb.append(line);
}
throw new IOException(sb.toString());
}
}
public static void main(String[] args) throws Exception {
String path = ".";
if (args.length > 0)
path = args[0];
System.out.println(new DF(new File(path), DF_INTERVAL_DEFAULT).toString());
}
}
| 6,746 | 29.668182 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
/** This class is used to represent the capacity, free and used space on a
* {@link FileSystem}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FsStatus implements Writable {
private long capacity;
private long used;
private long remaining;
/** Construct a FsStatus object, using the specified statistics */
public FsStatus(long capacity, long used, long remaining) {
this.capacity = capacity;
this.used = used;
this.remaining = remaining;
}
/** Return the capacity in bytes of the file system */
public long getCapacity() {
return capacity;
}
/** Return the number of bytes used on the file system */
public long getUsed() {
return used;
}
/** Return the number of remaining bytes on the file system */
public long getRemaining() {
return remaining;
}
//////////////////////////////////////////////////
// Writable
//////////////////////////////////////////////////
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(capacity);
out.writeLong(used);
out.writeLong(remaining);
}
@Override
public void readFields(DataInput in) throws IOException {
capacity = in.readLong();
used = in.readLong();
remaining = in.readLong();
}
}
| 2,359 | 29.649351 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnsupportedFileSystemException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* File system for a given file system name/scheme is not supported
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class UnsupportedFileSystemException extends IOException {
private static final long serialVersionUID = 1L;
/**
* Constructs exception with the specified detail message.
* @param message exception message.
*/
public UnsupportedFileSystemException(final String message) {
super(message);
}
}
| 1,437 | 34.073171 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/**
* Standard strings to use in exception messages in filesystems
* HDFS is used as the reference source of the strings
*/
public class FSExceptionMessages {
/**
* The operation failed because the stream is closed: {@value}
*/
public static final String STREAM_IS_CLOSED = "Stream is closed!";
/**
* Negative offset seek forbidden : {@value}
*/
public static final String NEGATIVE_SEEK =
"Cannot seek to a negative offset";
/**
* Seeks : {@value}
*/
public static final String CANNOT_SEEK_PAST_EOF =
"Attempted to seek or read past the end of the file";
}
| 1,441 | 31.772727 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.regex.Pattern;
import org.apache.avro.reflect.Stringable;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/** Names a file or directory in a {@link FileSystem}.
* Path strings use slash as the directory separator.
*/
@Stringable
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Path implements Comparable {
/** The directory separator, a slash. */
public static final String SEPARATOR = "/";
public static final char SEPARATOR_CHAR = '/';
public static final String CUR_DIR = ".";
public static final boolean WINDOWS
= System.getProperty("os.name").startsWith("Windows");
/**
* Pre-compiled regular expressions to detect path formats.
*/
private static final Pattern hasUriScheme =
Pattern.compile("^[a-zA-Z][a-zA-Z0-9+-.]+:");
private static final Pattern hasDriveLetterSpecifier =
Pattern.compile("^/?[a-zA-Z]:");
private URI uri; // a hierarchical uri
/**
* Pathnames with scheme and relative path are illegal.
*/
void checkNotSchemeWithRelative() {
if (toUri().isAbsolute() && !isUriPathAbsolute()) {
throw new HadoopIllegalArgumentException(
"Unsupported name: has scheme but relative path-part");
}
}
void checkNotRelative() {
if (!isAbsolute() && toUri().getScheme() == null) {
throw new HadoopIllegalArgumentException("Path is relative");
}
}
public static Path getPathWithoutSchemeAndAuthority(Path path) {
// This code depends on Path.toString() to remove the leading slash before
// the drive specification on Windows.
Path newPath = path.isUriPathAbsolute() ?
new Path(null, null, path.toUri().getPath()) :
path;
return newPath;
}
/** Resolve a child path against a parent path. */
public Path(String parent, String child) {
this(new Path(parent), new Path(child));
}
/** Resolve a child path against a parent path. */
public Path(Path parent, String child) {
this(parent, new Path(child));
}
/** Resolve a child path against a parent path. */
public Path(String parent, Path child) {
this(new Path(parent), child);
}
/** Resolve a child path against a parent path. */
public Path(Path parent, Path child) {
// Add a slash to parent's path so resolution is compatible with URI's
URI parentUri = parent.uri;
String parentPath = parentUri.getPath();
if (!(parentPath.equals("/") || parentPath.isEmpty())) {
try {
parentUri = new URI(parentUri.getScheme(), parentUri.getAuthority(),
parentUri.getPath()+"/", null, parentUri.getFragment());
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
}
URI resolved = parentUri.resolve(child.uri);
initialize(resolved.getScheme(), resolved.getAuthority(),
resolved.getPath(), resolved.getFragment());
}
private void checkPathArg( String path ) throws IllegalArgumentException {
// disallow construction of a Path from an empty string
if ( path == null ) {
throw new IllegalArgumentException(
"Can not create a Path from a null string");
}
if( path.length() == 0 ) {
throw new IllegalArgumentException(
"Can not create a Path from an empty string");
}
}
/** Construct a path from a String. Path strings are URIs, but with
* unescaped elements and some additional normalization. */
public Path(String pathString) throws IllegalArgumentException {
checkPathArg( pathString );
// We can't use 'new URI(String)' directly, since it assumes things are
// escaped, which we don't require of Paths.
// add a slash in front of paths with Windows drive letters
if (hasWindowsDrive(pathString) && pathString.charAt(0) != '/') {
pathString = "/" + pathString;
}
// parse uri components
String scheme = null;
String authority = null;
int start = 0;
// parse uri scheme, if any
int colon = pathString.indexOf(':');
int slash = pathString.indexOf('/');
if ((colon != -1) &&
((slash == -1) || (colon < slash))) { // has a scheme
scheme = pathString.substring(0, colon);
start = colon+1;
}
// parse uri authority, if any
if (pathString.startsWith("//", start) &&
(pathString.length()-start > 2)) { // has authority
int nextSlash = pathString.indexOf('/', start+2);
int authEnd = nextSlash > 0 ? nextSlash : pathString.length();
authority = pathString.substring(start+2, authEnd);
start = authEnd;
}
// uri path is the rest of the string -- query & fragment not supported
String path = pathString.substring(start, pathString.length());
initialize(scheme, authority, path, null);
}
/**
* Construct a path from a URI
*/
public Path(URI aUri) {
uri = aUri.normalize();
}
/** Construct a Path from components. */
public Path(String scheme, String authority, String path) {
checkPathArg( path );
// add a slash in front of paths with Windows drive letters
if (hasWindowsDrive(path) && path.charAt(0) != '/') {
path = "/" + path;
}
// add "./" in front of Linux relative paths so that a path containing
// a colon e.q. "a:b" will not be interpreted as scheme "a".
if (!WINDOWS && path.charAt(0) != '/') {
path = "./" + path;
}
initialize(scheme, authority, path, null);
}
private void initialize(String scheme, String authority, String path,
String fragment) {
try {
this.uri = new URI(scheme, authority, normalizePath(scheme, path), null, fragment)
.normalize();
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
}
/**
* Merge 2 paths such that the second path is appended relative to the first.
* The returned path has the scheme and authority of the first path. On
* Windows, the drive specification in the second path is discarded.
*
* @param path1 Path first path
* @param path2 Path second path, to be appended relative to path1
* @return Path merged path
*/
public static Path mergePaths(Path path1, Path path2) {
String path2Str = path2.toUri().getPath();
path2Str = path2Str.substring(startPositionWithoutWindowsDrive(path2Str));
// Add path components explicitly, because simply concatenating two path
// string is not safe, for example:
// "/" + "/foo" yields "//foo", which will be parsed as authority in Path
return new Path(path1.toUri().getScheme(),
path1.toUri().getAuthority(),
path1.toUri().getPath() + path2Str);
}
/**
* Normalize a path string to use non-duplicated forward slashes as
* the path separator and remove any trailing path separators.
* @param scheme Supplies the URI scheme. Used to deduce whether we
* should replace backslashes or not.
* @param path Supplies the scheme-specific part
* @return Normalized path string.
*/
private static String normalizePath(String scheme, String path) {
// Remove double forward slashes.
path = StringUtils.replace(path, "//", "/");
// Remove backslashes if this looks like a Windows path. Avoid
// the substitution if it looks like a non-local URI.
if (WINDOWS &&
(hasWindowsDrive(path) ||
(scheme == null) ||
(scheme.isEmpty()) ||
(scheme.equals("file")))) {
path = StringUtils.replace(path, "\\", "/");
}
// trim trailing slash from non-root path (ignoring windows drive)
int minLength = startPositionWithoutWindowsDrive(path) + 1;
if (path.length() > minLength && path.endsWith(SEPARATOR)) {
path = path.substring(0, path.length()-1);
}
return path;
}
private static boolean hasWindowsDrive(String path) {
return (WINDOWS && hasDriveLetterSpecifier.matcher(path).find());
}
private static int startPositionWithoutWindowsDrive(String path) {
if (hasWindowsDrive(path)) {
return path.charAt(0) == SEPARATOR_CHAR ? 3 : 2;
} else {
return 0;
}
}
/**
* Determine whether a given path string represents an absolute path on
* Windows. e.g. "C:/a/b" is an absolute path. "C:a/b" is not.
*
* @param pathString Supplies the path string to evaluate.
* @param slashed true if the given path is prefixed with "/".
* @return true if the supplied path looks like an absolute path with a Windows
* drive-specifier.
*/
public static boolean isWindowsAbsolutePath(final String pathString,
final boolean slashed) {
int start = startPositionWithoutWindowsDrive(pathString);
return start > 0
&& pathString.length() > start
&& ((pathString.charAt(start) == SEPARATOR_CHAR) ||
(pathString.charAt(start) == '\\'));
}
/** Convert this to a URI. */
public URI toUri() { return uri; }
/** Return the FileSystem that owns this Path. */
public FileSystem getFileSystem(Configuration conf) throws IOException {
return FileSystem.get(this.toUri(), conf);
}
/**
* Is an absolute path (ie a slash relative path part)
* AND a scheme is null AND authority is null.
*/
public boolean isAbsoluteAndSchemeAuthorityNull() {
return (isUriPathAbsolute() &&
uri.getScheme() == null && uri.getAuthority() == null);
}
/**
* True if the path component (i.e. directory) of this URI is absolute.
*/
public boolean isUriPathAbsolute() {
int start = startPositionWithoutWindowsDrive(uri.getPath());
return uri.getPath().startsWith(SEPARATOR, start);
}
/** True if the path is not a relative path and starts with root. */
public boolean isAbsolute() {
return isUriPathAbsolute();
}
/**
* @return true if and only if this path represents the root of a file system
*/
public boolean isRoot() {
return getParent() == null;
}
/** Returns the final component of this path.*/
public String getName() {
String path = uri.getPath();
int slash = path.lastIndexOf(SEPARATOR);
return path.substring(slash+1);
}
/** Returns the parent of a path or null if at root. */
public Path getParent() {
String path = uri.getPath();
int lastSlash = path.lastIndexOf('/');
int start = startPositionWithoutWindowsDrive(path);
if ((path.length() == start) || // empty path
(lastSlash == start && path.length() == start+1)) { // at root
return null;
}
String parent;
if (lastSlash==-1) {
parent = CUR_DIR;
} else {
parent = path.substring(0, lastSlash==start?start+1:lastSlash);
}
return new Path(uri.getScheme(), uri.getAuthority(), parent);
}
/** Adds a suffix to the final name in the path.*/
public Path suffix(String suffix) {
return new Path(getParent(), getName()+suffix);
}
@Override
public String toString() {
// we can't use uri.toString(), which escapes everything, because we want
// illegal characters unescaped in the string, for glob processing, etc.
StringBuilder buffer = new StringBuilder();
if (uri.getScheme() != null) {
buffer.append(uri.getScheme());
buffer.append(":");
}
if (uri.getAuthority() != null) {
buffer.append("//");
buffer.append(uri.getAuthority());
}
if (uri.getPath() != null) {
String path = uri.getPath();
if (path.indexOf('/')==0 &&
hasWindowsDrive(path) && // has windows drive
uri.getScheme() == null && // but no scheme
uri.getAuthority() == null) // or authority
path = path.substring(1); // remove slash before drive
buffer.append(path);
}
if (uri.getFragment() != null) {
buffer.append("#");
buffer.append(uri.getFragment());
}
return buffer.toString();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Path)) {
return false;
}
Path that = (Path)o;
return this.uri.equals(that.uri);
}
@Override
public int hashCode() {
return uri.hashCode();
}
@Override
public int compareTo(Object o) {
Path that = (Path)o;
return this.uri.compareTo(that.uri);
}
/** Return the number of elements in this path. */
public int depth() {
String path = uri.getPath();
int depth = 0;
int slash = path.length()==1 && path.charAt(0)=='/' ? -1 : 0;
while (slash != -1) {
depth++;
slash = path.indexOf(SEPARATOR, slash+1);
}
return depth;
}
/**
* Returns a qualified path object.
*
* Deprecated - use {@link #makeQualified(URI, Path)}
*/
@Deprecated
public Path makeQualified(FileSystem fs) {
return makeQualified(fs.getUri(), fs.getWorkingDirectory());
}
/** Returns a qualified path object. */
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
public Path makeQualified(URI defaultUri, Path workingDir ) {
Path path = this;
if (!isAbsolute()) {
path = new Path(workingDir, this);
}
URI pathUri = path.toUri();
String scheme = pathUri.getScheme();
String authority = pathUri.getAuthority();
String fragment = pathUri.getFragment();
if (scheme != null &&
(authority != null || defaultUri.getAuthority() == null))
return path;
if (scheme == null) {
scheme = defaultUri.getScheme();
}
if (authority == null) {
authority = defaultUri.getAuthority();
if (authority == null) {
authority = "";
}
}
URI newUri = null;
try {
newUri = new URI(scheme, authority ,
normalizePath(scheme, pathUri.getPath()), null, fragment);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
return new Path(newUri);
}
}
| 15,074 | 31.349785 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DirectoryListingStartAfterNotFoundException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown when the startAfter can't be found when listing a directory.
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Stable
public class DirectoryListingStartAfterNotFoundException extends IOException {
private static final long serialVersionUID = 1L;
public DirectoryListingStartAfterNotFoundException() {
super();
}
public DirectoryListingStartAfterNotFoundException(String msg) {
super(msg);
}
}
| 1,434 | 33.166667 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URISyntaxException;
import java.nio.channels.ClosedChannelException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
/**
* Abstract Checksumed Fs.
* It provide a basic implementation of a Checksumed Fs,
* which creates a checksum file for each raw file.
* It generates & verifies checksums at the client side.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
public abstract class ChecksumFs extends FilterFs {
private static final byte[] CHECKSUM_VERSION = new byte[] {'c', 'r', 'c', 0};
private int defaultBytesPerChecksum = 512;
private boolean verifyChecksum = true;
public static double getApproxChkSumLength(long size) {
return ChecksumFSOutputSummer.CHKSUM_AS_FRACTION * size;
}
public ChecksumFs(AbstractFileSystem theFs)
throws IOException, URISyntaxException {
super(theFs);
defaultBytesPerChecksum =
getMyFs().getServerDefaults().getBytesPerChecksum();
}
/**
* Set whether to verify checksum.
*/
@Override
public void setVerifyChecksum(boolean inVerifyChecksum) {
this.verifyChecksum = inVerifyChecksum;
}
/** get the raw file system. */
public AbstractFileSystem getRawFs() {
return getMyFs();
}
/** Return the name of the checksum file associated with a file.*/
public Path getChecksumFile(Path file) {
return new Path(file.getParent(), "." + file.getName() + ".crc");
}
/** Return true iff file is a checksum file name.*/
public static boolean isChecksumFile(Path file) {
String name = file.getName();
return name.startsWith(".") && name.endsWith(".crc");
}
/** Return the length of the checksum file given the size of the
* actual file.
**/
public long getChecksumFileLength(Path file, long fileSize) {
return getChecksumLength(fileSize, getBytesPerSum());
}
/** Return the bytes Per Checksum. */
public int getBytesPerSum() {
return defaultBytesPerChecksum;
}
private int getSumBufferSize(int bytesPerSum, int bufferSize)
throws IOException {
int defaultBufferSize = getMyFs().getServerDefaults().getFileBufferSize();
int proportionalBufferSize = bufferSize / bytesPerSum;
return Math.max(bytesPerSum,
Math.max(proportionalBufferSize, defaultBufferSize));
}
/*******************************************************
* For open()'s FSInputStream
* It verifies that data matches checksums.
*******************************************************/
private static class ChecksumFSInputChecker extends FSInputChecker {
public static final Log LOG
= LogFactory.getLog(FSInputChecker.class);
private static final int HEADER_LENGTH = 8;
private ChecksumFs fs;
private FSDataInputStream datas;
private FSDataInputStream sums;
private int bytesPerSum = 1;
private long fileLen = -1L;
public ChecksumFSInputChecker(ChecksumFs fs, Path file)
throws IOException, UnresolvedLinkException {
this(fs, file, fs.getServerDefaults().getFileBufferSize());
}
public ChecksumFSInputChecker(ChecksumFs fs, Path file, int bufferSize)
throws IOException, UnresolvedLinkException {
super(file, fs.getFileStatus(file).getReplication());
this.datas = fs.getRawFs().open(file, bufferSize);
this.fs = fs;
Path sumFile = fs.getChecksumFile(file);
try {
int sumBufferSize = fs.getSumBufferSize(fs.getBytesPerSum(),
bufferSize);
sums = fs.getRawFs().open(sumFile, sumBufferSize);
byte[] version = new byte[CHECKSUM_VERSION.length];
sums.readFully(version);
if (!Arrays.equals(version, CHECKSUM_VERSION)) {
throw new IOException("Not a checksum file: "+sumFile);
}
this.bytesPerSum = sums.readInt();
set(fs.verifyChecksum, DataChecksum.newCrc32(), bytesPerSum, 4);
} catch (FileNotFoundException e) { // quietly ignore
set(fs.verifyChecksum, null, 1, 0);
} catch (IOException e) { // loudly ignore
LOG.warn("Problem opening checksum file: "+ file +
". Ignoring exception: " , e);
set(fs.verifyChecksum, null, 1, 0);
}
}
private long getChecksumFilePos(long dataPos) {
return HEADER_LENGTH + 4*(dataPos/bytesPerSum);
}
@Override
protected long getChunkPosition(long dataPos) {
return dataPos/bytesPerSum*bytesPerSum;
}
@Override
public int available() throws IOException {
return datas.available() + super.available();
}
@Override
public int read(long position, byte[] b, int off, int len)
throws IOException, UnresolvedLinkException {
// parameter check
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
if (position<0) {
throw new IllegalArgumentException(
"Parameter position can not to be negative");
}
ChecksumFSInputChecker checker = new ChecksumFSInputChecker(fs, file);
checker.seek(position);
int nread = checker.read(b, off, len);
checker.close();
return nread;
}
@Override
public void close() throws IOException {
datas.close();
if (sums != null) {
sums.close();
}
set(fs.verifyChecksum, null, 1, 0);
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
final long sumsPos = getChecksumFilePos(targetPos);
fs.reportChecksumFailure(file, datas, targetPos, sums, sumsPos);
final boolean newDataSource = datas.seekToNewSource(targetPos);
return sums.seekToNewSource(sumsPos) || newDataSource;
}
@Override
protected int readChunk(long pos, byte[] buf, int offset, int len,
byte[] checksum) throws IOException {
boolean eof = false;
if (needChecksum()) {
assert checksum != null; // we have a checksum buffer
assert checksum.length % CHECKSUM_SIZE == 0; // it is sane length
assert len >= bytesPerSum; // we must read at least one chunk
final int checksumsToRead = Math.min(
len/bytesPerSum, // number of checksums based on len to read
checksum.length / CHECKSUM_SIZE); // size of checksum buffer
long checksumPos = getChecksumFilePos(pos);
if(checksumPos != sums.getPos()) {
sums.seek(checksumPos);
}
int sumLenRead = sums.read(checksum, 0, CHECKSUM_SIZE * checksumsToRead);
if (sumLenRead >= 0 && sumLenRead % CHECKSUM_SIZE != 0) {
throw new EOFException("Checksum file not a length multiple of checksum size " +
"in " + file + " at " + pos + " checksumpos: " + checksumPos +
" sumLenread: " + sumLenRead );
}
if (sumLenRead <= 0) { // we're at the end of the file
eof = true;
} else {
// Adjust amount of data to read based on how many checksum chunks we read
len = Math.min(len, bytesPerSum * (sumLenRead / CHECKSUM_SIZE));
}
}
if (pos != datas.getPos()) {
datas.seek(pos);
}
int nread = readFully(datas, buf, offset, len);
if (eof && nread > 0) {
throw new ChecksumException("Checksum error: "+file+" at "+pos, pos);
}
return nread;
}
/* Return the file length */
private long getFileLength() throws IOException, UnresolvedLinkException {
if (fileLen==-1L) {
fileLen = fs.getFileStatus(file).getLen();
}
return fileLen;
}
/**
* Skips over and discards <code>n</code> bytes of data from the
* input stream.
*
* The <code>skip</code> method skips over some smaller number of bytes
* when reaching end of file before <code>n</code> bytes have been skipped.
* The actual number of bytes skipped is returned. If <code>n</code> is
* negative, no bytes are skipped.
*
* @param n the number of bytes to be skipped.
* @return the actual number of bytes skipped.
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to skip to is corrupted
*/
@Override
public synchronized long skip(long n) throws IOException {
final long curPos = getPos();
final long fileLength = getFileLength();
if (n+curPos > fileLength) {
n = fileLength - curPos;
}
return super.skip(n);
}
/**
* Seek to the given position in the stream.
* The next read() will be from that position.
*
* <p>This method does not allow seek past the end of the file.
* This produces IOException.
*
* @param pos the postion to seek to.
* @exception IOException if an I/O error occurs or seeks after EOF
* ChecksumException if the chunk to seek to is corrupted
*/
@Override
public synchronized void seek(long pos) throws IOException {
if (pos>getFileLength()) {
throw new IOException("Cannot seek after EOF");
}
super.seek(pos);
}
}
@Override
public boolean truncate(Path f, long newLength) throws IOException {
throw new IOException("Not supported");
}
/**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
@Override
public FSDataInputStream open(Path f, int bufferSize)
throws IOException, UnresolvedLinkException {
return new FSDataInputStream(
new ChecksumFSInputChecker(this, f, bufferSize));
}
/**
* Calculated the length of the checksum file in bytes.
* @param size the length of the data file in bytes
* @param bytesPerSum the number of bytes in a checksum block
* @return the number of bytes in the checksum file
*/
public static long getChecksumLength(long size, int bytesPerSum) {
//the checksum length is equal to size passed divided by bytesPerSum +
//bytes written in the beginning of the checksum file.
return ((size + bytesPerSum - 1) / bytesPerSum) * 4 +
CHECKSUM_VERSION.length + 4;
}
/** This class provides an output stream for a checksummed file.
* It generates checksums for data. */
private static class ChecksumFSOutputSummer extends FSOutputSummer {
private FSDataOutputStream datas;
private FSDataOutputStream sums;
private static final float CHKSUM_AS_FRACTION = 0.01f;
private boolean isClosed = false;
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file,
final EnumSet<CreateFlag> createFlag,
final FsPermission absolutePermission, final int bufferSize,
final short replication, final long blockSize,
final Progressable progress, final ChecksumOpt checksumOpt,
final boolean createParent) throws IOException {
super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
fs.getBytesPerSum()));
// checksumOpt is passed down to the raw fs. Unless it implements
// checksum impelemts internally, checksumOpt will be ignored.
// If the raw fs does checksum internally, we will end up with
// two layers of checksumming. i.e. checksumming checksum file.
this.datas = fs.getRawFs().createInternal(file, createFlag,
absolutePermission, bufferSize, replication, blockSize, progress,
checksumOpt, createParent);
// Now create the chekcsumfile; adjust the buffsize
int bytesPerSum = fs.getBytesPerSum();
int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file),
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
absolutePermission, sumBufferSize, replication, blockSize, progress,
checksumOpt, createParent);
sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
sums.writeInt(bytesPerSum);
}
@Override
public void close() throws IOException {
try {
flushBuffer();
sums.close();
datas.close();
} finally {
isClosed = true;
}
}
@Override
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
int ckoff, int cklen)
throws IOException {
datas.write(b, offset, len);
sums.write(checksum, ckoff, cklen);
}
@Override
protected void checkClosed() throws IOException {
if (isClosed) {
throw new ClosedChannelException();
}
}
}
@Override
public FSDataOutputStream createInternal(Path f,
EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
int bufferSize, short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt, boolean createParent) throws IOException {
final FSDataOutputStream out = new FSDataOutputStream(
new ChecksumFSOutputSummer(this, f, createFlag, absolutePermission,
bufferSize, replication, blockSize, progress,
checksumOpt, createParent), null);
return out;
}
/** Check if exists.
* @param f source file
*/
private boolean exists(Path f)
throws IOException, UnresolvedLinkException {
try {
return getMyFs().getFileStatus(f) != null;
} catch (FileNotFoundException e) {
return false;
}
}
/** True iff the named path is a directory.
* Note: Avoid using this method. Instead reuse the FileStatus
* returned by getFileStatus() or listStatus() methods.
*/
private boolean isDirectory(Path f)
throws IOException, UnresolvedLinkException {
try {
return getMyFs().getFileStatus(f).isDirectory();
} catch (FileNotFoundException e) {
return false; // f does not exist
}
}
/**
* Set replication for an existing file.
* Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
* @param src file name
* @param replication new replication
* @throws IOException
* @return true if successful;
* false if file does not exist or is a directory
*/
@Override
public boolean setReplication(Path src, short replication)
throws IOException, UnresolvedLinkException {
boolean value = getMyFs().setReplication(src, replication);
if (!value) {
return false;
}
Path checkFile = getChecksumFile(src);
if (exists(checkFile)) {
getMyFs().setReplication(checkFile, replication);
}
return true;
}
/**
* Rename files/dirs.
*/
@Override
public void renameInternal(Path src, Path dst)
throws IOException, UnresolvedLinkException {
if (isDirectory(src)) {
getMyFs().rename(src, dst);
} else {
getMyFs().rename(src, dst);
Path checkFile = getChecksumFile(src);
if (exists(checkFile)) { //try to rename checksum
if (isDirectory(dst)) {
getMyFs().rename(checkFile, dst);
} else {
getMyFs().rename(checkFile, getChecksumFile(dst));
}
}
}
}
/**
* Implement the delete(Path, boolean) in checksum
* file system.
*/
@Override
public boolean delete(Path f, boolean recursive)
throws IOException, UnresolvedLinkException {
FileStatus fstatus = null;
try {
fstatus = getMyFs().getFileStatus(f);
} catch(FileNotFoundException e) {
return false;
}
if (fstatus.isDirectory()) {
//this works since the crcs are in the same
//directories and the files. so we just delete
//everything in the underlying filesystem
return getMyFs().delete(f, recursive);
} else {
Path checkFile = getChecksumFile(f);
if (exists(checkFile)) {
getMyFs().delete(checkFile, true);
}
return getMyFs().delete(f, true);
}
}
/**
* Report a checksum error to the file system.
* @param f the file name containing the error
* @param in the stream open on the file
* @param inPos the position of the beginning of the bad data in the file
* @param sums the stream open on the checksum file
* @param sumsPos the position of the beginning of the bad data in the
* checksum file
* @return if retry is neccessary
*/
public boolean reportChecksumFailure(Path f, FSDataInputStream in,
long inPos, FSDataInputStream sums, long sumsPos) {
return false;
}
@Override
public FileStatus[] listStatus(Path f) throws IOException,
UnresolvedLinkException {
ArrayList<FileStatus> results = new ArrayList<FileStatus>();
FileStatus[] listing = getMyFs().listStatus(f);
if (listing != null) {
for (int i = 0; i < listing.length; i++) {
if (!isChecksumFile(listing[i].getPath())) {
results.add(listing[i]);
}
}
}
return results.toArray(new FileStatus[results.size()]);
}
}
| 18,484 | 33.746241 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedInputStream;
import java.io.EOFException;
import java.io.FileDescriptor;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A class optimizes reading from FSInputStream by bufferring
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class BufferedFSInputStream extends BufferedInputStream
implements Seekable, PositionedReadable, HasFileDescriptor {
/**
* Creates a <code>BufferedFSInputStream</code>
* with the specified buffer size,
* and saves its argument, the input stream
* <code>in</code>, for later use. An internal
* buffer array of length <code>size</code>
* is created and stored in <code>buf</code>.
*
* @param in the underlying input stream.
* @param size the buffer size.
* @exception IllegalArgumentException if size <= 0.
*/
public BufferedFSInputStream(FSInputStream in, int size) {
super(in, size);
}
@Override
public long getPos() throws IOException {
if (in == null) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
return ((FSInputStream)in).getPos()-(count-pos);
}
@Override
public long skip(long n) throws IOException {
if (n <= 0) {
return 0;
}
seek(getPos()+n);
return n;
}
@Override
public void seek(long pos) throws IOException {
if (in == null) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
if (pos < 0) {
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
}
if (this.pos != this.count) {
// optimize: check if the pos is in the buffer
// This optimization only works if pos != count -- if they are
// equal, it's possible that the previous reads were just
// longer than the total buffer size, and hence skipped the buffer.
long end = ((FSInputStream)in).getPos();
long start = end - count;
if( pos>=start && pos<end) {
this.pos = (int)(pos-start);
return;
}
}
// invalidate buffer
this.pos = 0;
this.count = 0;
((FSInputStream)in).seek(pos);
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
pos = 0;
count = 0;
return ((FSInputStream)in).seekToNewSource(targetPos);
}
@Override
public int read(long position, byte[] buffer, int offset, int length) throws IOException {
return ((FSInputStream)in).read(position, buffer, offset, length) ;
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length) throws IOException {
((FSInputStream)in).readFully(position, buffer, offset, length);
}
@Override
public void readFully(long position, byte[] buffer) throws IOException {
((FSInputStream)in).readFully(position, buffer);
}
@Override
public FileDescriptor getFileDescriptor() throws IOException {
if (in instanceof HasFileDescriptor) {
return ((HasFileDescriptor) in).getFileDescriptor();
} else {
return null;
}
}
}
| 3,948 | 29.376923 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Syncable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** This interface for flush/sync operation. */
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface Syncable {
/**
* @deprecated As of HADOOP 0.21.0, replaced by hflush
* @see #hflush()
*/
@Deprecated public void sync() throws IOException;
/** Flush out the data in client's user buffer. After the return of
* this call, new readers will see the data.
* @throws IOException if any error occurs
*/
public void hflush() throws IOException;
/** Similar to posix fsync, flush out the data in client's user buffer
* all the way to the disk device (but the disk may have it in its cache).
* @throws IOException if error occurs
*/
public void hsync() throws IOException;
}
| 1,721 | 34.875 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.util.regex.PatternSyntaxException;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A filter for POSIX glob pattern with brace expansions.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class GlobFilter implements PathFilter {
private final static PathFilter DEFAULT_FILTER = new PathFilter() {
@Override
public boolean accept(Path file) {
return true;
}
};
private PathFilter userFilter = DEFAULT_FILTER;
private GlobPattern pattern;
/**
* Creates a glob filter with the specified file pattern.
*
* @param filePattern the file pattern.
* @throws IOException thrown if the file pattern is incorrect.
*/
public GlobFilter(String filePattern) throws IOException {
init(filePattern, DEFAULT_FILTER);
}
/**
* Creates a glob filter with the specified file pattern and an user filter.
*
* @param filePattern the file pattern.
* @param filter user filter in addition to the glob pattern.
* @throws IOException thrown if the file pattern is incorrect.
*/
public GlobFilter(String filePattern, PathFilter filter) throws IOException {
init(filePattern, filter);
}
void init(String filePattern, PathFilter filter) throws IOException {
try {
userFilter = filter;
pattern = new GlobPattern(filePattern);
}
catch (PatternSyntaxException e) {
// Existing code expects IOException startWith("Illegal file pattern")
throw new IOException("Illegal file pattern: "+ e.getMessage(), e);
}
}
boolean hasPattern() {
return pattern.hasWildcard();
}
@Override
public boolean accept(Path path) {
return pattern.matches(path.getName()) && userFilter.accept(path);
}
}
| 2,676 | 30.869048 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIsNotDirectoryException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/** ENOTDIR */
public class PathIsNotDirectoryException extends PathExistsException {
static final long serialVersionUID = 0L;
/** @param path for the exception */
public PathIsNotDirectoryException(String path) {
super(path, "Is not a directory");
}
}
| 1,102 | 39.851852 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileAlreadyExistsException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Used when target file already exists for any operation and
* is not configured to be overwritten.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FileAlreadyExistsException
extends IOException {
public FileAlreadyExistsException() {
super();
}
public FileAlreadyExistsException(String msg) {
super(msg);
}
}
| 1,352 | 29.75 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
import com.google.common.annotations.VisibleForTesting;
/**
* This class provides an interface for implementors of a Hadoop file system
* (analogous to the VFS of Unix). Applications do not access this class;
* instead they access files across all file systems using {@link FileContext}.
*
* Pathnames passed to AbstractFileSystem can be fully qualified URI that
* matches the "this" file system (ie same scheme and authority)
* or a Slash-relative name that is assumed to be relative
* to the root of the "this" file system .
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
public abstract class AbstractFileSystem {
static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
/** Recording statistics per a file system class. */
private static final Map<URI, Statistics>
STATISTICS_TABLE = new HashMap<URI, Statistics>();
/** Cache of constructors for each file system class. */
private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE =
new ConcurrentHashMap<Class<?>, Constructor<?>>();
private static final Class<?>[] URI_CONFIG_ARGS =
new Class[]{URI.class, Configuration.class};
/** The statistics for this file system. */
protected Statistics statistics;
@VisibleForTesting
static final String NO_ABSTRACT_FS_ERROR = "No AbstractFileSystem configured for scheme";
private final URI myUri;
public Statistics getStatistics() {
return statistics;
}
/**
* Returns true if the specified string is considered valid in the path part
* of a URI by this file system. The default implementation enforces the rules
* of HDFS, but subclasses may override this method to implement specific
* validation rules for specific file systems.
*
* @param src String source filename to check, path part of the URI
* @return boolean true if the specified string is considered valid
*/
public boolean isValidName(String src) {
// Prohibit ".." "." and anything containing ":"
StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
while(tokens.hasMoreTokens()) {
String element = tokens.nextToken();
if (element.equals("..") ||
element.equals(".") ||
(element.indexOf(":") >= 0)) {
return false;
}
}
return true;
}
/**
* Create an object for the given class and initialize it from conf.
* @param theClass class of which an object is created
* @param conf Configuration
* @return a new object
*/
@SuppressWarnings("unchecked")
static <T> T newInstance(Class<T> theClass,
URI uri, Configuration conf) {
T result;
try {
Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
if (meth == null) {
meth = theClass.getDeclaredConstructor(URI_CONFIG_ARGS);
meth.setAccessible(true);
CONSTRUCTOR_CACHE.put(theClass, meth);
}
result = meth.newInstance(uri, conf);
} catch (Exception e) {
throw new RuntimeException(e);
}
return result;
}
/**
* Create a file system instance for the specified uri using the conf. The
* conf is used to find the class name that implements the file system. The
* conf is also passed to the file system for its configuration.
*
* @param uri URI of the file system
* @param conf Configuration for the file system
*
* @return Returns the file system for the given URI
*
* @throws UnsupportedFileSystemException file system for <code>uri</code> is
* not found
*/
public static AbstractFileSystem createFileSystem(URI uri, Configuration conf)
throws UnsupportedFileSystemException {
final String fsImplConf = String.format("fs.AbstractFileSystem.%s.impl",
uri.getScheme());
Class<?> clazz = conf.getClass(fsImplConf, null);
if (clazz == null) {
throw new UnsupportedFileSystemException(String.format(
"%s=null: %s: %s",
fsImplConf, NO_ABSTRACT_FS_ERROR, uri.getScheme()));
}
return (AbstractFileSystem) newInstance(clazz, uri, conf);
}
/**
* Get the statistics for a particular file system.
*
* @param uri
* used as key to lookup STATISTICS_TABLE. Only scheme and authority
* part of the uri are used.
* @return a statistics object
*/
protected static synchronized Statistics getStatistics(URI uri) {
String scheme = uri.getScheme();
if (scheme == null) {
throw new IllegalArgumentException("Scheme not defined in the uri: "
+ uri);
}
URI baseUri = getBaseUri(uri);
Statistics result = STATISTICS_TABLE.get(baseUri);
if (result == null) {
result = new Statistics(scheme);
STATISTICS_TABLE.put(baseUri, result);
}
return result;
}
private static URI getBaseUri(URI uri) {
String scheme = uri.getScheme();
String authority = uri.getAuthority();
String baseUriString = scheme + "://";
if (authority != null) {
baseUriString = baseUriString + authority;
} else {
baseUriString = baseUriString + "/";
}
return URI.create(baseUriString);
}
public static synchronized void clearStatistics() {
for(Statistics stat: STATISTICS_TABLE.values()) {
stat.reset();
}
}
/**
* Prints statistics for all file systems.
*/
public static synchronized void printStatistics() {
for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
System.out.println(" FileSystem " + pair.getKey().getScheme() + "://"
+ pair.getKey().getAuthority() + ": " + pair.getValue());
}
}
protected static synchronized Map<URI, Statistics> getAllStatistics() {
Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>(
STATISTICS_TABLE.size());
for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
URI key = pair.getKey();
Statistics value = pair.getValue();
Statistics newStatsObj = new Statistics(value);
statsMap.put(URI.create(key.toString()), newStatsObj);
}
return statsMap;
}
/**
* The main factory method for creating a file system. Get a file system for
* the URI's scheme and authority. The scheme of the <code>uri</code>
* determines a configuration property name,
* <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the
* AbstractFileSystem class.
*
* The entire URI and conf is passed to the AbstractFileSystem factory method.
*
* @param uri for the file system to be created.
* @param conf which is passed to the file system impl.
*
* @return file system for the given URI.
*
* @throws UnsupportedFileSystemException if the file system for
* <code>uri</code> is not supported.
*/
public static AbstractFileSystem get(final URI uri, final Configuration conf)
throws UnsupportedFileSystemException {
return createFileSystem(uri, conf);
}
/**
* Constructor to be called by subclasses.
*
* @param uri for this file system.
* @param supportedScheme the scheme supported by the implementor
* @param authorityNeeded if true then theURI must have authority, if false
* then the URI must have null authority.
*
* @throws URISyntaxException <code>uri</code> has syntax error
*/
public AbstractFileSystem(final URI uri, final String supportedScheme,
final boolean authorityNeeded, final int defaultPort)
throws URISyntaxException {
myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort);
statistics = getStatistics(uri);
}
/**
* Check that the Uri's scheme matches
* @param uri
* @param supportedScheme
*/
public void checkScheme(URI uri, String supportedScheme) {
String scheme = uri.getScheme();
if (scheme == null) {
throw new HadoopIllegalArgumentException("Uri without scheme: " + uri);
}
if (!scheme.equals(supportedScheme)) {
throw new HadoopIllegalArgumentException("Uri scheme " + uri
+ " does not match the scheme " + supportedScheme);
}
}
/**
* Get the URI for the file system based on the given URI. The path, query
* part of the given URI is stripped out and default file system port is used
* to form the URI.
*
* @param uri FileSystem URI.
* @param authorityNeeded if true authority cannot be null in the URI. If
* false authority must be null.
* @param defaultPort default port to use if port is not specified in the URI.
*
* @return URI of the file system
*
* @throws URISyntaxException <code>uri</code> has syntax error
*/
private URI getUri(URI uri, String supportedScheme,
boolean authorityNeeded, int defaultPort) throws URISyntaxException {
checkScheme(uri, supportedScheme);
// A file system implementation that requires authority must always
// specify default port
if (defaultPort < 0 && authorityNeeded) {
throw new HadoopIllegalArgumentException(
"FileSystem implementation error - default port " + defaultPort
+ " is not valid");
}
String authority = uri.getAuthority();
if (authority == null) {
if (authorityNeeded) {
throw new HadoopIllegalArgumentException("Uri without authority: " + uri);
} else {
return new URI(supportedScheme + ":///");
}
}
// authority is non null - AuthorityNeeded may be true or false.
int port = uri.getPort();
port = (port == -1 ? defaultPort : port);
if (port == -1) { // no port supplied and default port is not specified
return new URI(supportedScheme, authority, "/", null);
}
return new URI(supportedScheme + "://" + uri.getHost() + ":" + port);
}
/**
* The default port of this file system.
*
* @return default port of this file system's Uri scheme
* A uri with a port of -1 => default port;
*/
public abstract int getUriDefaultPort();
/**
* Returns a URI whose scheme and authority identify this FileSystem.
*
* @return the uri of this file system.
*/
public URI getUri() {
return myUri;
}
/**
* Check that a Path belongs to this FileSystem.
*
* If the path is fully qualified URI, then its scheme and authority
* matches that of this file system. Otherwise the path must be
* slash-relative name.
*
* @throws InvalidPathException if the path is invalid
*/
public void checkPath(Path path) {
URI uri = path.toUri();
String thatScheme = uri.getScheme();
String thatAuthority = uri.getAuthority();
if (thatScheme == null) {
if (thatAuthority == null) {
if (path.isUriPathAbsolute()) {
return;
}
throw new InvalidPathException("relative paths not allowed:" +
path);
} else {
throw new InvalidPathException(
"Path without scheme with non-null authority:" + path);
}
}
String thisScheme = this.getUri().getScheme();
String thisHost = this.getUri().getHost();
String thatHost = uri.getHost();
// Schemes and hosts must match.
// Allow for null Authority for file:///
if (!thisScheme.equalsIgnoreCase(thatScheme) ||
(thisHost != null &&
!thisHost.equalsIgnoreCase(thatHost)) ||
(thisHost == null && thatHost != null)) {
throw new InvalidPathException("Wrong FS: " + path + ", expected: "
+ this.getUri());
}
// Ports must match, unless this FS instance is using the default port, in
// which case the port may be omitted from the given URI
int thisPort = this.getUri().getPort();
int thatPort = uri.getPort();
if (thatPort == -1) { // -1 => defaultPort of Uri scheme
thatPort = this.getUriDefaultPort();
}
if (thisPort != thatPort) {
throw new InvalidPathException("Wrong FS: " + path + ", expected: "
+ this.getUri());
}
}
/**
* Get the path-part of a pathname. Checks that URI matches this file system
* and that the path-part is a valid name.
*
* @param p path
*
* @return path-part of the Path p
*/
public String getUriPath(final Path p) {
checkPath(p);
String s = p.toUri().getPath();
if (!isValidName(s)) {
throw new InvalidPathException("Path part " + s + " from URI " + p
+ " is not a valid filename.");
}
return s;
}
/**
* Make the path fully qualified to this file system
* @param path
* @return the qualified path
*/
public Path makeQualified(Path path) {
checkPath(path);
return path.makeQualified(this.getUri(), null);
}
/**
* Some file systems like LocalFileSystem have an initial workingDir
* that is used as the starting workingDir. For other file systems
* like HDFS there is no built in notion of an initial workingDir.
*
* @return the initial workingDir if the file system has such a notion
* otherwise return a null.
*/
public Path getInitialWorkingDirectory() {
return null;
}
/**
* Return the current user's home directory in this file system.
* The default implementation returns "/user/$USER/".
*
* @return current user's home directory.
*/
public Path getHomeDirectory() {
return new Path("/user/"+System.getProperty("user.name")).makeQualified(
getUri(), null);
}
/**
* Return a set of server default configuration values.
*
* @return server default configuration values
*
* @throws IOException an I/O error occurred
*/
public abstract FsServerDefaults getServerDefaults() throws IOException;
/**
* Return the fully-qualified path of path f resolving the path
* through any internal symlinks or mount point
* @param p path to be resolved
* @return fully qualified path
* @throws FileNotFoundException, AccessControlException, IOException
* UnresolvedLinkException if symbolic link on path cannot be resolved
* internally
*/
public Path resolvePath(final Path p) throws FileNotFoundException,
UnresolvedLinkException, AccessControlException, IOException {
checkPath(p);
return getFileStatus(p).getPath(); // default impl is to return the path
}
/**
* The specification of this method matches that of
* {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
* that the Path f must be fully qualified and the permission is absolute
* (i.e. umask has been applied).
*/
public final FSDataOutputStream create(final Path f,
final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, UnresolvedLinkException, IOException {
checkPath(f);
int bufferSize = -1;
short replication = -1;
long blockSize = -1;
int bytesPerChecksum = -1;
ChecksumOpt checksumOpt = null;
FsPermission permission = null;
Progressable progress = null;
Boolean createParent = null;
for (CreateOpts iOpt : opts) {
if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
if (blockSize != -1) {
throw new HadoopIllegalArgumentException(
"BlockSize option is set multiple times");
}
blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
} else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
if (bufferSize != -1) {
throw new HadoopIllegalArgumentException(
"BufferSize option is set multiple times");
}
bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
} else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
if (replication != -1) {
throw new HadoopIllegalArgumentException(
"ReplicationFactor option is set multiple times");
}
replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
} else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
if (bytesPerChecksum != -1) {
throw new HadoopIllegalArgumentException(
"BytesPerChecksum option is set multiple times");
}
bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
} else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) {
if (checksumOpt != null) {
throw new HadoopIllegalArgumentException(
"CreateChecksumType option is set multiple times");
}
checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue();
} else if (CreateOpts.Perms.class.isInstance(iOpt)) {
if (permission != null) {
throw new HadoopIllegalArgumentException(
"Perms option is set multiple times");
}
permission = ((CreateOpts.Perms) iOpt).getValue();
} else if (CreateOpts.Progress.class.isInstance(iOpt)) {
if (progress != null) {
throw new HadoopIllegalArgumentException(
"Progress option is set multiple times");
}
progress = ((CreateOpts.Progress) iOpt).getValue();
} else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
if (createParent != null) {
throw new HadoopIllegalArgumentException(
"CreateParent option is set multiple times");
}
createParent = ((CreateOpts.CreateParent) iOpt).getValue();
} else {
throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " +
iOpt.getClass().getName());
}
}
if (permission == null) {
throw new HadoopIllegalArgumentException("no permission supplied");
}
FsServerDefaults ssDef = getServerDefaults();
if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
throw new IOException("Internal error: default blockSize is" +
" not a multiple of default bytesPerChecksum ");
}
if (blockSize == -1) {
blockSize = ssDef.getBlockSize();
}
// Create a checksum option honoring user input as much as possible.
// If bytesPerChecksum is specified, it will override the one set in
// checksumOpt. Any missing value will be filled in using the default.
ChecksumOpt defaultOpt = new ChecksumOpt(
ssDef.getChecksumType(),
ssDef.getBytesPerChecksum());
checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt,
checksumOpt, bytesPerChecksum);
if (bufferSize == -1) {
bufferSize = ssDef.getFileBufferSize();
}
if (replication == -1) {
replication = ssDef.getReplication();
}
if (createParent == null) {
createParent = false;
}
if (blockSize % bytesPerChecksum != 0) {
throw new HadoopIllegalArgumentException(
"blockSize should be a multiple of checksumsize");
}
return this.createInternal(f, createFlag, permission, bufferSize,
replication, blockSize, progress, checksumOpt, createParent);
}
/**
* The specification of this method matches that of
* {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts
* have been declared explicitly.
*/
public abstract FSDataOutputStream createInternal(Path f,
EnumSet<CreateFlag> flag, FsPermission absolutePermission,
int bufferSize, short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt, boolean createParent)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path
* f must be fully qualified and the permission is absolute (i.e.
* umask has been applied).
*/
public abstract void mkdir(final Path dir, final FsPermission permission,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#delete(Path, boolean)} except that Path f must be for
* this file system.
*/
public abstract boolean delete(final Path f, final boolean recursive)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#open(Path)} except that Path f must be for this
* file system.
*/
public FSDataInputStream open(final Path f) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
return open(f, getServerDefaults().getFileBufferSize());
}
/**
* The specification of this method matches that of
* {@link FileContext#open(Path, int)} except that Path f must be for this
* file system.
*/
public abstract FSDataInputStream open(final Path f, int bufferSize)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#truncate(Path, long)} except that Path f must be for
* this file system.
*/
public boolean truncate(Path f, long newLength)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support truncate");
}
/**
* The specification of this method matches that of
* {@link FileContext#setReplication(Path, short)} except that Path f must be
* for this file system.
*/
public abstract boolean setReplication(final Path f,
final short replication) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
* f must be for this file system.
*/
public final void rename(final Path src, final Path dst,
final Options.Rename... options) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnresolvedLinkException, IOException {
boolean overwrite = false;
if (null != options) {
for (Rename option : options) {
if (option == Rename.OVERWRITE) {
overwrite = true;
}
}
}
renameInternal(src, dst, overwrite);
}
/**
* The specification of this method matches that of
* {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
* f must be for this file system and NO OVERWRITE is performed.
*
* File systems that do not have a built in overwrite need implement only this
* method and can take advantage of the default impl of the other
* {@link #renameInternal(Path, Path, boolean)}
*/
public abstract void renameInternal(final Path src, final Path dst)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
* f must be for this file system.
*/
public void renameInternal(final Path src, final Path dst,
boolean overwrite) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnresolvedLinkException, IOException {
// Default implementation deals with overwrite in a non-atomic way
final FileStatus srcStatus = getFileLinkStatus(src);
FileStatus dstStatus;
try {
dstStatus = getFileLinkStatus(dst);
} catch (IOException e) {
dstStatus = null;
}
if (dstStatus != null) {
if (dst.equals(src)) {
throw new FileAlreadyExistsException(
"The source "+src+" and destination "+dst+" are the same");
}
if (srcStatus.isSymlink() && dst.equals(srcStatus.getSymlink())) {
throw new FileAlreadyExistsException(
"Cannot rename symlink "+src+" to its target "+dst);
}
// It's OK to rename a file to a symlink and vice versa
if (srcStatus.isDirectory() != dstStatus.isDirectory()) {
throw new IOException("Source " + src + " and destination " + dst
+ " must both be directories");
}
if (!overwrite) {
throw new FileAlreadyExistsException("Rename destination " + dst
+ " already exists.");
}
// Delete the destination that is a file or an empty directory
if (dstStatus.isDirectory()) {
RemoteIterator<FileStatus> list = listStatusIterator(dst);
if (list != null && list.hasNext()) {
throw new IOException(
"Rename cannot overwrite non empty destination directory " + dst);
}
}
delete(dst, false);
} else {
final Path parent = dst.getParent();
final FileStatus parentStatus = getFileStatus(parent);
if (parentStatus.isFile()) {
throw new ParentNotDirectoryException("Rename destination parent "
+ parent + " is a file.");
}
}
renameInternal(src, dst);
}
/**
* Returns true if the file system supports symlinks, false otherwise.
* @return true if filesystem supports symlinks
*/
public boolean supportsSymlinks() {
return false;
}
/**
* The specification of this method matches that of
* {@link FileContext#createSymlink(Path, Path, boolean)};
*/
public void createSymlink(final Path target, final Path link,
final boolean createParent) throws IOException, UnresolvedLinkException {
throw new IOException("File system does not support symlinks");
}
/**
* Partially resolves the path. This is used during symlink resolution in
* {@link FSLinkResolver}, and differs from the similarly named method
* {@link FileContext#getLinkTarget(Path)}.
* @throws IOException subclass implementations may throw IOException
*/
public Path getLinkTarget(final Path f) throws IOException {
throw new AssertionError("Implementation Error: " + getClass()
+ " that threw an UnresolvedLinkException, causing this method to be"
+ " called, needs to override this method.");
}
/**
* The specification of this method matches that of
* {@link FileContext#setPermission(Path, FsPermission)} except that Path f
* must be for this file system.
*/
public abstract void setPermission(final Path f,
final FsPermission permission) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#setOwner(Path, String, String)} except that Path f must
* be for this file system.
*/
public abstract void setOwner(final Path f, final String username,
final String groupname) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#setTimes(Path, long, long)} except that Path f must be
* for this file system.
*/
public abstract void setTimes(final Path f, final long mtime,
final long atime) throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#getFileChecksum(Path)} except that Path f must be for
* this file system.
*/
public abstract FileChecksum getFileChecksum(final Path f)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#getFileStatus(Path)}
* except that an UnresolvedLinkException may be thrown if a symlink is
* encountered in the path.
*/
public abstract FileStatus getFileStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#access(Path, FsAction)}
* except that an UnresolvedLinkException may be thrown if a symlink is
* encountered in the path.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
}
/**
* The specification of this method matches that of
* {@link FileContext#getFileLinkStatus(Path)}
* except that an UnresolvedLinkException may be thrown if a symlink is
* encountered in the path leading up to the final path component.
* If the file system does not support symlinks then the behavior is
* equivalent to {@link AbstractFileSystem#getFileStatus(Path)}.
*/
public FileStatus getFileLinkStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
return getFileStatus(f);
}
/**
* The specification of this method matches that of
* {@link FileContext#getFileBlockLocations(Path, long, long)} except that
* Path f must be for this file system.
*/
public abstract BlockLocation[] getFileBlockLocations(final Path f,
final long start, final long len) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#getFsStatus(Path)} except that Path f must be for this
* file system.
*/
public FsStatus getFsStatus(final Path f) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
// default impl gets FsStatus of root
return getFsStatus();
}
/**
* The specification of this method matches that of
* {@link FileContext#getFsStatus(Path)}.
*/
public abstract FsStatus getFsStatus() throws AccessControlException,
FileNotFoundException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#listStatus(Path)} except that Path f must be for this
* file system.
*/
public RemoteIterator<FileStatus> listStatusIterator(final Path f)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
return new RemoteIterator<FileStatus>() {
private int i = 0;
private FileStatus[] statusList = listStatus(f);
@Override
public boolean hasNext() {
return i < statusList.length;
}
@Override
public FileStatus next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return statusList[i++];
}
};
}
/**
* The specification of this method matches that of
* {@link FileContext#listLocatedStatus(Path)} except that Path f
* must be for this file system.
*/
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
return new RemoteIterator<LocatedFileStatus>() {
private RemoteIterator<FileStatus> itor = listStatusIterator(f);
@Override
public boolean hasNext() throws IOException {
return itor.hasNext();
}
@Override
public LocatedFileStatus next() throws IOException {
if (!hasNext()) {
throw new NoSuchElementException("No more entry in " + f);
}
FileStatus result = itor.next();
BlockLocation[] locs = null;
if (result.isFile()) {
locs = getFileBlockLocations(
result.getPath(), 0, result.getLen());
}
return new LocatedFileStatus(result, locs);
}
};
}
/**
* The specification of this method matches that of
* {@link FileContext.Util#listStatus(Path)} except that Path f must be
* for this file system.
*/
public abstract FileStatus[] listStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
/**
* @return an iterator over the corrupt files under the given path
* (may contain duplicates if a file has more than one corrupt block)
* @throws IOException
*/
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getCanonicalName() +
" does not support" +
" listCorruptFileBlocks");
}
/**
* The specification of this method matches that of
* {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f
* must be for this file system.
*/
public abstract void setVerifyChecksum(final boolean verifyChecksum)
throws AccessControlException, IOException;
/**
* Get a canonical name for this file system.
* @return a URI string that uniquely identifies this file system
*/
public String getCanonicalServiceName() {
return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort());
}
/**
* Get one or more delegation tokens associated with the filesystem. Normally
* a file system returns a single delegation token. A file system that manages
* multiple file systems underneath, could return set of delegation tokens for
* all the file systems it manages
*
* @param renewer the account name that is allowed to renew the token.
* @return List of delegation tokens.
* If delegation tokens not supported then return a list of size zero.
* @throws IOException
*/
@InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
return new ArrayList<Token<?>>(0);
}
/**
* Modifies ACL entries of files and directories. This method can add new ACL
* entries or modify the permissions on existing ACL entries. All existing
* ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.)
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications
* @throws IOException if an ACL could not be modified
*/
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support modifyAclEntries");
}
/**
* Removes ACL entries from files and directories. Other ACL entries are
* retained.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing entries to remove
* @throws IOException if an ACL could not be modified
*/
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAclEntries");
}
/**
* Removes all default ACL entries from files and directories.
*
* @param path Path to modify
* @throws IOException if an ACL could not be modified
*/
public void removeDefaultAcl(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeDefaultAcl");
}
/**
* Removes all but the base ACL entries of files and directories. The entries
* for user, group, and others are retained for compatibility with permission
* bits.
*
* @param path Path to modify
* @throws IOException if an ACL could not be removed
*/
public void removeAcl(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAcl");
}
/**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications, must include entries
* for user, group, and others for compatibility with permission bits.
* @throws IOException if an ACL could not be modified
*/
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setAcl");
}
/**
* Gets the ACLs of files and directories.
*
* @param path Path to get
* @return RemoteIterator<AclStatus> which returns each AclStatus
* @throws IOException if an ACL could not be read
*/
public AclStatus getAclStatus(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getAclStatus");
}
/**
* Set an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to modify
* @param name xattr name.
* @param value xattr value.
* @throws IOException
*/
public void setXAttr(Path path, String name, byte[] value)
throws IOException {
setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
}
/**
* Set an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to modify
* @param name xattr name.
* @param value xattr value.
* @param flag xattr set flag
* @throws IOException
*/
public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setXAttr");
}
/**
* Get an xattr for a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attribute
* @param name xattr name.
* @return byte[] xattr value.
* @throws IOException
*/
public byte[] getXAttr(Path path, String name) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttr");
}
/**
* Get all of the xattrs for a file or directory.
* Only those xattrs for which the logged-in user has permissions to view
* are returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @throws IOException
*/
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttrs");
}
/**
* Get all of the xattrs for a file or directory.
* Only those xattrs for which the logged-in user has permissions to view
* are returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @param names XAttr names.
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @throws IOException
*/
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttrs");
}
/**
* Get all of the xattr names for a file or directory.
* Only the xattr names for which the logged-in user has permissions to view
* are returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @throws IOException
*/
public List<String> listXAttrs(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support listXAttrs");
}
/**
* Remove an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to remove extended attribute
* @param name xattr name
* @throws IOException
*/
public void removeXAttr(Path path, String name) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeXAttr");
}
/**
* The specification of this method matches that of
* {@link FileContext#createSnapshot(Path, String)}.
*/
public Path createSnapshot(final Path path, final String snapshotName)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support createSnapshot");
}
/**
* The specification of this method matches that of
* {@link FileContext#renameSnapshot(Path, String, String)}.
*/
public void renameSnapshot(final Path path, final String snapshotOldName,
final String snapshotNewName) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support renameSnapshot");
}
/**
* The specification of this method matches that of
* {@link FileContext#deleteSnapshot(Path, String)}.
*/
public void deleteSnapshot(final Path snapshotDir, final String snapshotName)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support deleteSnapshot");
}
/**
* Set the storage policy for a given file or directory.
*
* @param path file or directory path.
* @param policyName the name of the target storage policy. The list
* of supported Storage policies can be retrieved
* via {@link #getAllStoragePolicies}.
*/
public void setStoragePolicy(final Path path, final String policyName)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setStoragePolicy");
}
/**
* Retrieve the storage policy for a given file or directory.
*
* @param src file or directory path.
* @return storage policy for give file.
* @throws IOException
*/
public BlockStoragePolicySpi getStoragePolicy(final Path src)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getStoragePolicy");
}
/**
* Retrieve all the storage policies supported by this file system.
*
* @return all storage policies supported by this filesystem.
* @throws IOException
*/
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getAllStoragePolicies");
}
@Override //Object
public int hashCode() {
return myUri.hashCode();
}
@Override //Object
public boolean equals(Object other) {
if (other == null || !(other instanceof AbstractFileSystem)) {
return false;
}
return myUri.equals(((AbstractFileSystem) other).myUri);
}
}
| 46,447 | 35.372749 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.FsPermission;
/**
* This class defines a FileStatus that includes a file's block locations.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class LocatedFileStatus extends FileStatus {
private BlockLocation[] locations;
public LocatedFileStatus() {
super();
}
/**
* Constructor
* @param stat a file status
* @param locations a file's block locations
*/
public LocatedFileStatus(FileStatus stat, BlockLocation[] locations)
throws IOException {
this(stat.getLen(), stat.isDirectory(), stat.getReplication(),
stat.getBlockSize(), stat.getModificationTime(),
stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
stat.getGroup(), null, stat.getPath(), locations);
if (stat.isSymlink()) {
setSymlink(stat.getSymlink());
}
}
/**
* Constructor
*
* @param length a file's length
* @param isdir if the path is a directory
* @param block_replication the file's replication factor
* @param blocksize a file's block size
* @param modification_time a file's modification time
* @param access_time a file's access time
* @param permission a file's permission
* @param owner a file's owner
* @param group a file's group
* @param symlink symlink if the path is a symbolic link
* @param path the path's qualified name
* @param locations a file's block locations
*/
public LocatedFileStatus(long length, boolean isdir,
int block_replication,
long blocksize, long modification_time, long access_time,
FsPermission permission, String owner, String group,
Path symlink,
Path path,
BlockLocation[] locations) {
super(length, isdir, block_replication, blocksize, modification_time,
access_time, permission, owner, group, symlink, path);
this.locations = locations;
}
/**
* Get the file's block locations
* @return the file's block locations
*/
public BlockLocation[] getBlockLocations() {
return locations;
}
/**
* Compare this FileStatus to another FileStatus
* @param o the FileStatus to be compared.
* @return a negative integer, zero, or a positive integer as this object
* is less than, equal to, or greater than the specified object.
*/
@Override
public int compareTo(FileStatus o) {
return super.compareTo(o);
}
/** Compare if this object is equal to another object
* @param o the object to be compared.
* @return true if two file status has the same path name; false if not.
*/
@Override
public boolean equals(Object o) {
return super.equals(o);
}
/**
* Returns a hash code value for the object, which is defined as
* the hash code of the path name.
*
* @return a hash code value for the path name.
*/
@Override
public int hashCode() {
return super.hashCode();
}
}
| 3,918 | 30.861789 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class Globber {
public static final Log LOG = LogFactory.getLog(Globber.class.getName());
private final FileSystem fs;
private final FileContext fc;
private final Path pathPattern;
private final PathFilter filter;
public Globber(FileSystem fs, Path pathPattern, PathFilter filter) {
this.fs = fs;
this.fc = null;
this.pathPattern = pathPattern;
this.filter = filter;
}
public Globber(FileContext fc, Path pathPattern, PathFilter filter) {
this.fs = null;
this.fc = fc;
this.pathPattern = pathPattern;
this.filter = filter;
}
private FileStatus getFileStatus(Path path) throws IOException {
try {
if (fs != null) {
return fs.getFileStatus(path);
} else {
return fc.getFileStatus(path);
}
} catch (FileNotFoundException e) {
return null;
}
}
private FileStatus[] listStatus(Path path) throws IOException {
try {
if (fs != null) {
return fs.listStatus(path);
} else {
return fc.util().listStatus(path);
}
} catch (FileNotFoundException e) {
return new FileStatus[0];
}
}
private Path fixRelativePart(Path path) {
if (fs != null) {
return fs.fixRelativePart(path);
} else {
return fc.fixRelativePart(path);
}
}
/**
* Convert a path component that contains backslash ecape sequences to a
* literal string. This is necessary when you want to explicitly refer to a
* path that contains globber metacharacters.
*/
private static String unescapePathComponent(String name) {
return name.replaceAll("\\\\(.)", "$1");
}
/**
* Translate an absolute path into a list of path components.
* We merge double slashes into a single slash here.
* POSIX root path, i.e. '/', does not get an entry in the list.
*/
private static List<String> getPathComponents(String path)
throws IOException {
ArrayList<String> ret = new ArrayList<String>();
for (String component : path.split(Path.SEPARATOR)) {
if (!component.isEmpty()) {
ret.add(component);
}
}
return ret;
}
private String schemeFromPath(Path path) throws IOException {
String scheme = path.toUri().getScheme();
if (scheme == null) {
if (fs != null) {
scheme = fs.getUri().getScheme();
} else {
scheme = fc.getFSofPath(fc.fixRelativePart(path)).
getUri().getScheme();
}
}
return scheme;
}
private String authorityFromPath(Path path) throws IOException {
String authority = path.toUri().getAuthority();
if (authority == null) {
if (fs != null) {
authority = fs.getUri().getAuthority();
} else {
authority = fc.getFSofPath(fc.fixRelativePart(path)).
getUri().getAuthority();
}
}
return authority ;
}
public FileStatus[] glob() throws IOException {
TraceScope scope = Trace.startSpan("Globber#glob");
Span span = scope.getSpan();
if (span != null) {
span.addKVAnnotation("pattern", pathPattern.toUri().getPath());
}
try {
return doGlob();
} finally {
scope.close();
}
}
private FileStatus[] doGlob() throws IOException {
// First we get the scheme and authority of the pattern that was passed
// in.
String scheme = schemeFromPath(pathPattern);
String authority = authorityFromPath(pathPattern);
// Next we strip off everything except the pathname itself, and expand all
// globs. Expansion is a process which turns "grouping" clauses,
// expressed as brackets, into separate path patterns.
String pathPatternString = pathPattern.toUri().getPath();
List<String> flattenedPatterns = GlobExpander.expand(pathPatternString);
// Now loop over all flattened patterns. In every case, we'll be trying to
// match them to entries in the filesystem.
ArrayList<FileStatus> results =
new ArrayList<FileStatus>(flattenedPatterns.size());
boolean sawWildcard = false;
for (String flatPattern : flattenedPatterns) {
// Get the absolute path for this flattened pattern. We couldn't do
// this prior to flattening because of patterns like {/,a}, where which
// path you go down influences how the path must be made absolute.
Path absPattern = fixRelativePart(new Path(
flatPattern.isEmpty() ? Path.CUR_DIR : flatPattern));
// Now we break the flattened, absolute pattern into path components.
// For example, /a/*/c would be broken into the list [a, *, c]
List<String> components =
getPathComponents(absPattern.toUri().getPath());
// Starting out at the root of the filesystem, we try to match
// filesystem entries against pattern components.
ArrayList<FileStatus> candidates = new ArrayList<FileStatus>(1);
// To get the "real" FileStatus of root, we'd have to do an expensive
// RPC to the NameNode. So we create a placeholder FileStatus which has
// the correct path, but defaults for the rest of the information.
// Later, if it turns out we actually want the FileStatus of root, we'll
// replace the placeholder with a real FileStatus obtained from the
// NameNode.
FileStatus rootPlaceholder;
if (Path.WINDOWS && !components.isEmpty()
&& Path.isWindowsAbsolutePath(absPattern.toUri().getPath(), true)) {
// On Windows the path could begin with a drive letter, e.g. /E:/foo.
// We will skip matching the drive letter and start from listing the
// root of the filesystem on that drive.
String driveLetter = components.remove(0);
rootPlaceholder = new FileStatus(0, true, 0, 0, 0, new Path(scheme,
authority, Path.SEPARATOR + driveLetter + Path.SEPARATOR));
} else {
rootPlaceholder = new FileStatus(0, true, 0, 0, 0,
new Path(scheme, authority, Path.SEPARATOR));
}
candidates.add(rootPlaceholder);
for (int componentIdx = 0; componentIdx < components.size();
componentIdx++) {
ArrayList<FileStatus> newCandidates =
new ArrayList<FileStatus>(candidates.size());
GlobFilter globFilter = new GlobFilter(components.get(componentIdx));
String component = unescapePathComponent(components.get(componentIdx));
if (globFilter.hasPattern()) {
sawWildcard = true;
}
if (candidates.isEmpty() && sawWildcard) {
// Optimization: if there are no more candidates left, stop examining
// the path components. We can only do this if we've already seen
// a wildcard component-- otherwise, we still need to visit all path
// components in case one of them is a wildcard.
break;
}
if ((componentIdx < components.size() - 1) &&
(!globFilter.hasPattern())) {
// Optimization: if this is not the terminal path component, and we
// are not matching against a glob, assume that it exists. If it
// doesn't exist, we'll find out later when resolving a later glob
// or the terminal path component.
for (FileStatus candidate : candidates) {
candidate.setPath(new Path(candidate.getPath(), component));
}
continue;
}
for (FileStatus candidate : candidates) {
if (globFilter.hasPattern()) {
FileStatus[] children = listStatus(candidate.getPath());
if (children.length == 1) {
// If we get back only one result, this could be either a listing
// of a directory with one entry, or it could reflect the fact
// that what we listed resolved to a file.
//
// Unfortunately, we can't just compare the returned paths to
// figure this out. Consider the case where you have /a/b, where
// b is a symlink to "..". In that case, listing /a/b will give
// back "/a/b" again. If we just went by returned pathname, we'd
// incorrectly conclude that /a/b was a file and should not match
// /a/*/*. So we use getFileStatus of the path we just listed to
// disambiguate.
if (!getFileStatus(candidate.getPath()).isDirectory()) {
continue;
}
}
for (FileStatus child : children) {
if (componentIdx < components.size() - 1) {
// Don't try to recurse into non-directories. See HADOOP-10957.
if (!child.isDirectory()) continue;
}
// Set the child path based on the parent path.
child.setPath(new Path(candidate.getPath(),
child.getPath().getName()));
if (globFilter.accept(child.getPath())) {
newCandidates.add(child);
}
}
} else {
// When dealing with non-glob components, use getFileStatus
// instead of listStatus. This is an optimization, but it also
// is necessary for correctness in HDFS, since there are some
// special HDFS directories like .reserved and .snapshot that are
// not visible to listStatus, but which do exist. (See HADOOP-9877)
FileStatus childStatus = getFileStatus(
new Path(candidate.getPath(), component));
if (childStatus != null) {
newCandidates.add(childStatus);
}
}
}
candidates = newCandidates;
}
for (FileStatus status : candidates) {
// Use object equality to see if this status is the root placeholder.
// See the explanation for rootPlaceholder above for more information.
if (status == rootPlaceholder) {
status = getFileStatus(rootPlaceholder.getPath());
if (status == null) continue;
}
// HADOOP-3497 semantics: the user-defined filter is applied at the
// end, once the full path is built up.
if (filter.accept(status.getPath())) {
results.add(status);
}
}
}
/*
* When the input pattern "looks" like just a simple filename, and we
* can't find it, we return null rather than an empty array.
* This is a special case which the shell relies on.
*
* To be more precise: if there were no results, AND there were no
* groupings (aka brackets), and no wildcards in the input (aka stars),
* we return null.
*/
if ((!sawWildcard) && results.isEmpty() &&
(flattenedPatterns.size() <= 1)) {
return null;
}
/*
* In general, the results list will already be sorted, since listStatus
* returns results in sorted order for many Hadoop filesystems. However,
* not all Hadoop filesystems have this property. So we sort here in order
* to get consistent results. See HADOOP-10798 for details.
*/
FileStatus ret[] = results.toArray(new FileStatus[0]);
Arrays.sort(ret);
return ret;
}
}
| 12,505 | 38.451104 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.commons.codec.binary.Hex;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* FileEncryptionInfo encapsulates all the encryption-related information for
* an encrypted file.
*/
@InterfaceAudience.Private
public class FileEncryptionInfo {
private final CipherSuite cipherSuite;
private final CryptoProtocolVersion version;
private final byte[] edek;
private final byte[] iv;
private final String keyName;
private final String ezKeyVersionName;
/**
* Create a FileEncryptionInfo.
*
* @param suite CipherSuite used to encrypt the file
* @param edek encrypted data encryption key (EDEK) of the file
* @param iv initialization vector (IV) used to encrypt the file
* @param keyName name of the key used for the encryption zone
* @param ezKeyVersionName name of the KeyVersion used to encrypt the
* encrypted data encryption key.
*/
public FileEncryptionInfo(final CipherSuite suite,
final CryptoProtocolVersion version, final byte[] edek,
final byte[] iv, final String keyName, final String ezKeyVersionName) {
checkNotNull(suite);
checkNotNull(version);
checkNotNull(edek);
checkNotNull(iv);
checkNotNull(keyName);
checkNotNull(ezKeyVersionName);
checkArgument(iv.length == suite.getAlgorithmBlockSize(),
"Unexpected IV length");
this.cipherSuite = suite;
this.version = version;
this.edek = edek;
this.iv = iv;
this.keyName = keyName;
this.ezKeyVersionName = ezKeyVersionName;
}
/**
* @return {@link org.apache.hadoop.crypto.CipherSuite} used to encrypt
* the file.
*/
public CipherSuite getCipherSuite() {
return cipherSuite;
}
/**
* @return {@link org.apache.hadoop.crypto.CryptoProtocolVersion} to use
* to access the file.
*/
public CryptoProtocolVersion getCryptoProtocolVersion() {
return version;
}
/**
* @return encrypted data encryption key (EDEK) for the file
*/
public byte[] getEncryptedDataEncryptionKey() {
return edek;
}
/**
* @return initialization vector (IV) for the cipher used to encrypt the file
*/
public byte[] getIV() {
return iv;
}
/**
* @return name of the encryption zone key.
*/
public String getKeyName() { return keyName; }
/**
* @return name of the encryption zone KeyVersion used to encrypt the
* encrypted data encryption key (EDEK).
*/
public String getEzKeyVersionName() { return ezKeyVersionName; }
@Override
public String toString() {
StringBuilder builder = new StringBuilder("{");
builder.append("cipherSuite: " + cipherSuite);
builder.append(", cryptoProtocolVersion: " + version);
builder.append(", edek: " + Hex.encodeHexString(edek));
builder.append(", iv: " + Hex.encodeHexString(iv));
builder.append(", keyName: " + keyName);
builder.append(", ezKeyVersionName: " + ezKeyVersionName);
builder.append("}");
return builder.toString();
}
}
| 4,068 | 31.552 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RemoteIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.NoSuchElementException;
/**
* An iterator over a collection whose elements need to be fetched remotely
*/
public interface RemoteIterator<E> {
/**
* Returns <tt>true</tt> if the iteration has more elements.
*
* @return <tt>true</tt> if the iterator has more elements.
* @throws IOException if any IO error occurs
*/
boolean hasNext() throws IOException;
/**
* Returns the next element in the iteration.
*
* @return the next element in the iteration.
* @throws NoSuchElementException iteration has no more elements.
* @throws IOException if any IO error occurs
*/
E next() throws IOException;
}
| 1,521 | 34.395349 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DUHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.File;
import org.apache.hadoop.util.Shell;
public class DUHelper {
private int folderCount=0;
private int fileCount=0;
private double usage = 0;
private long folderSize = -1;
private DUHelper() {
}
public static long getFolderUsage(String folder) {
return new DUHelper().calculateFolderSize(folder);
}
private long calculateFolderSize(String folder) {
if (folder == null)
throw new IllegalArgumentException("folder");
File f = new File(folder);
return folderSize = getFileSize(f);
}
public String check(String folder) {
if (folder == null)
throw new IllegalArgumentException("folder");
File f = new File(folder);
folderSize = getFileSize(f);
usage = 1.0*(f.getTotalSpace() - f.getFreeSpace())/ f.getTotalSpace();
return String.format("used %d files %d disk in use %f", folderSize, fileCount, usage);
}
public long getFileCount() {
return fileCount;
}
public double getUsage() {
return usage;
}
private long getFileSize(File folder) {
folderCount++;
//Counting the total folders
long foldersize = 0;
if (folder.isFile())
return folder.length();
File[] filelist = folder.listFiles();
if (filelist == null) {
return 0;
}
for (int i = 0; i < filelist.length; i++) {
if (filelist[i].isDirectory()) {
foldersize += getFileSize(filelist[i]);
} else {
fileCount++; //Counting the total files
foldersize += filelist[i].length();
}
}
return foldersize;
}
public static void main(String[] args) {
if (Shell.WINDOWS)
System.out.println("Windows: "+ DUHelper.getFolderUsage(args[0]));
else
System.out.println("Other: " + DUHelper.getFolderUsage(args[0]));
}
}
| 2,626 | 27.868132 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSError.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** Thrown for unexpected filesystem errors, presumed to reflect disk errors
* in the native filesystem. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FSError extends Error {
private static final long serialVersionUID = 1L;
FSError(Throwable cause) {
super(cause);
}
}
| 1,263 | 35.114286 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.Closeable;
import java.io.IOException;
import org.apache.avro.file.SeekableInput;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** Adapts an {@link FSDataInputStream} to Avro's SeekableInput interface. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class AvroFSInput implements Closeable, SeekableInput {
private final FSDataInputStream stream;
private final long len;
/** Construct given an {@link FSDataInputStream} and its length. */
public AvroFSInput(final FSDataInputStream in, final long len) {
this.stream = in;
this.len = len;
}
/** Construct given a {@link FileContext} and a {@link Path}. */
public AvroFSInput(final FileContext fc, final Path p) throws IOException {
FileStatus status = fc.getFileStatus(p);
this.len = status.getLen();
this.stream = fc.open(p);
}
@Override
public long length() {
return len;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return stream.read(b, off, len);
}
@Override
public void seek(long p) throws IOException {
stream.seek(p);
}
@Override
public long tell() throws IOException {
return stream.getPos();
}
@Override
public void close() throws IOException {
stream.close();
}
}
| 2,193 | 29.054795 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InvalidRequestException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
/**
* Thrown when the user makes a malformed request, for example missing required
* parameters or parameters that are not valid.
*/
public class InvalidRequestException extends IOException {
static final long serialVersionUID = 0L;
public InvalidRequestException(String str) {
super(str);
}
}
| 1,175 | 34.636364 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.DataChecksum;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.znerd.xmlenc.XMLOutputter;
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
/** MD5 of MD5 of CRC32. */
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable
public class MD5MD5CRC32FileChecksum extends FileChecksum {
public static final int LENGTH = MD5Hash.MD5_LEN
+ (Integer.SIZE + Long.SIZE)/Byte.SIZE;
private int bytesPerCRC;
private long crcPerBlock;
private MD5Hash md5;
/** Same as this(0, 0, null) */
public MD5MD5CRC32FileChecksum() {
this(0, 0, null);
}
/** Create a MD5FileChecksum */
public MD5MD5CRC32FileChecksum(int bytesPerCRC, long crcPerBlock, MD5Hash md5) {
this.bytesPerCRC = bytesPerCRC;
this.crcPerBlock = crcPerBlock;
this.md5 = md5;
}
@Override
public String getAlgorithmName() {
return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC +
getCrcType().name();
}
public static DataChecksum.Type getCrcTypeFromAlgorithmName(String algorithm)
throws IOException {
if (algorithm.endsWith(DataChecksum.Type.CRC32.name())) {
return DataChecksum.Type.CRC32;
} else if (algorithm.endsWith(DataChecksum.Type.CRC32C.name())) {
return DataChecksum.Type.CRC32C;
}
throw new IOException("Unknown checksum type in " + algorithm);
}
@Override
public int getLength() {return LENGTH;}
@Override
public byte[] getBytes() {
return WritableUtils.toByteArray(this);
}
/** returns the CRC type */
public DataChecksum.Type getCrcType() {
// default to the one that is understood by all releases.
return DataChecksum.Type.CRC32;
}
@Override
public ChecksumOpt getChecksumOpt() {
return new ChecksumOpt(getCrcType(), bytesPerCRC);
}
@Override
public void readFields(DataInput in) throws IOException {
bytesPerCRC = in.readInt();
crcPerBlock = in.readLong();
md5 = MD5Hash.read(in);
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(bytesPerCRC);
out.writeLong(crcPerBlock);
md5.write(out);
}
/** Write that object to xml output. */
public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that
) throws IOException {
xml.startTag(MD5MD5CRC32FileChecksum.class.getName());
if (that != null) {
xml.attribute("bytesPerCRC", "" + that.bytesPerCRC);
xml.attribute("crcPerBlock", "" + that.crcPerBlock);
xml.attribute("crcType", ""+ that.getCrcType().name());
xml.attribute("md5", "" + that.md5);
}
xml.endTag();
}
/** Return the object represented in the attributes. */
public static MD5MD5CRC32FileChecksum valueOf(Attributes attrs
) throws SAXException {
final String bytesPerCRC = attrs.getValue("bytesPerCRC");
final String crcPerBlock = attrs.getValue("crcPerBlock");
final String md5 = attrs.getValue("md5");
String crcType = attrs.getValue("crcType");
DataChecksum.Type finalCrcType;
if (bytesPerCRC == null || crcPerBlock == null || md5 == null) {
return null;
}
try {
// old versions don't support crcType.
if (crcType == null || crcType == "") {
finalCrcType = DataChecksum.Type.CRC32;
} else {
finalCrcType = DataChecksum.Type.valueOf(crcType);
}
switch (finalCrcType) {
case CRC32:
return new MD5MD5CRC32GzipFileChecksum(
Integer.parseInt(bytesPerCRC),
Integer.parseInt(crcPerBlock),
new MD5Hash(md5));
case CRC32C:
return new MD5MD5CRC32CastagnoliFileChecksum(
Integer.parseInt(bytesPerCRC),
Integer.parseInt(crcPerBlock),
new MD5Hash(md5));
default:
// we should never get here since finalCrcType will
// hold a valid type or we should have got an exception.
return null;
}
} catch (Exception e) {
throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC
+ ", crcPerBlock=" + crcPerBlock + ", crcType=" + crcType
+ ", md5=" + md5, e);
}
}
@Override
public String toString() {
return getAlgorithmName() + ":" + md5;
}
}
| 5,508 | 31.216374 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.ReflectionUtils;
import java.io.IOException;
/**
* This interface is used for implementing different Trash policies.
* Provides factory method to create instances of the configured Trash policy.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class TrashPolicy extends Configured {
protected FileSystem fs; // the FileSystem
protected Path trash; // path to trash directory
protected long deletionInterval; // deletion interval for Emptier
/**
* Used to setup the trash policy. Must be implemented by all TrashPolicy
* implementations
* @param conf the configuration to be used
* @param fs the filesystem to be used
* @param home the home directory
*/
public abstract void initialize(Configuration conf, FileSystem fs, Path home);
/**
* Returns whether the Trash Policy is enabled for this filesystem
*/
public abstract boolean isEnabled();
/**
* Move a file or directory to the current trash directory.
* @return false if the item is already in the trash or trash is disabled
*/
public abstract boolean moveToTrash(Path path) throws IOException;
/**
* Create a trash checkpoint.
*/
public abstract void createCheckpoint() throws IOException;
/**
* Delete old trash checkpoint(s).
*/
public abstract void deleteCheckpoint() throws IOException;
/**
* Get the current working directory of the Trash Policy
*/
public abstract Path getCurrentTrashDir();
/**
* Return a {@link Runnable} that periodically empties the trash of all
* users, intended to be run by the superuser.
*/
public abstract Runnable getEmptier() throws IOException;
/**
* Get an instance of the configured TrashPolicy based on the value
* of the configuration parameter fs.trash.classname.
*
* @param conf the configuration to be used
* @param fs the file system to be used
* @param home the home directory
* @return an instance of TrashPolicy
*/
public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home) {
Class<? extends TrashPolicy> trashClass = conf.getClass(
"fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
trash.initialize(conf, fs, home); // initialize TrashPolicy
return trash;
}
}
| 3,438 | 34.453608 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Stack;
import java.util.TreeSet;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RpcClientException;
import org.apache.hadoop.ipc.RpcServerException;
import org.apache.hadoop.ipc.UnexpectedServerException;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ShutdownHookManager;
import com.google.common.base.Preconditions;
/**
* The FileContext class provides an interface to the application writer for
* using the Hadoop file system.
* It provides a set of methods for the usual operation: create, open,
* list, etc
*
* <p>
* <b> *** Path Names *** </b>
* <p>
*
* The Hadoop file system supports a URI name space and URI names.
* It offers a forest of file systems that can be referenced using fully
* qualified URIs.
* Two common Hadoop file systems implementations are
* <ul>
* <li> the local file system: file:///path
* <li> the hdfs file system hdfs://nnAddress:nnPort/path
* </ul>
*
* While URI names are very flexible, it requires knowing the name or address
* of the server. For convenience one often wants to access the default system
* in one's environment without knowing its name/address. This has an
* additional benefit that it allows one to change one's default fs
* (e.g. admin moves application from cluster1 to cluster2).
* <p>
*
* To facilitate this, Hadoop supports a notion of a default file system.
* The user can set his default file system, although this is
* typically set up for you in your environment via your default config.
* A default file system implies a default scheme and authority; slash-relative
* names (such as /for/bar) are resolved relative to that default FS.
* Similarly a user can also have working-directory-relative names (i.e. names
* not starting with a slash). While the working directory is generally in the
* same default FS, the wd can be in a different FS.
* <p>
* Hence Hadoop path names can be one of:
* <ul>
* <li> fully qualified URI: scheme://authority/path
* <li> slash relative names: /path relative to the default file system
* <li> wd-relative names: path relative to the working dir
* </ul>
* Relative paths with scheme (scheme:foo/bar) are illegal.
*
* <p>
* <b>****The Role of the FileContext and configuration defaults****</b>
* <p>
* The FileContext provides file namespace context for resolving file names;
* it also contains the umask for permissions, In that sense it is like the
* per-process file-related state in Unix system.
* These two properties
* <ul>
* <li> default file system i.e your slash)
* <li> umask
* </ul>
* in general, are obtained from the default configuration file
* in your environment, (@see {@link Configuration}).
*
* No other configuration parameters are obtained from the default config as
* far as the file context layer is concerned. All file system instances
* (i.e. deployments of file systems) have default properties; we call these
* server side (SS) defaults. Operation like create allow one to select many
* properties: either pass them in as explicit parameters or use
* the SS properties.
* <p>
* The file system related SS defaults are
* <ul>
* <li> the home directory (default is "/user/userName")
* <li> the initial wd (only for local fs)
* <li> replication factor
* <li> block size
* <li> buffer size
* <li> encryptDataTransfer
* <li> checksum option. (checksumType and bytesPerChecksum)
* </ul>
*
* <p>
* <b> *** Usage Model for the FileContext class *** </b>
* <p>
* Example 1: use the default config read from the $HADOOP_CONFIG/core.xml.
* Unspecified values come from core-defaults.xml in the release jar.
* <ul>
* <li> myFContext = FileContext.getFileContext(); // uses the default config
* // which has your default FS
* <li> myFContext.create(path, ...);
* <li> myFContext.setWorkingDir(path)
* <li> myFContext.open (path, ...);
* </ul>
* Example 2: Get a FileContext with a specific URI as the default FS
* <ul>
* <li> myFContext = FileContext.getFileContext(URI)
* <li> myFContext.create(path, ...);
* ...
* </ul>
* Example 3: FileContext with local file system as the default
* <ul>
* <li> myFContext = FileContext.getLocalFSFileContext()
* <li> myFContext.create(path, ...);
* <li> ...
* </ul>
* Example 4: Use a specific config, ignoring $HADOOP_CONFIG
* Generally you should not need use a config unless you are doing
* <ul>
* <li> configX = someConfigSomeOnePassedToYou.
* <li> myFContext = getFileContext(configX); // configX is not changed,
* // is passed down
* <li> myFContext.create(path, ...);
* <li>...
* </ul>
*
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
public class FileContext {
public static final Log LOG = LogFactory.getLog(FileContext.class);
/**
* Default permission for directory and symlink
* In previous versions, this default permission was also used to
* create files, so files created end up with ugo+x permission.
* See HADOOP-9155 for detail.
* Two new constants are added to solve this, please use
* {@link FileContext#DIR_DEFAULT_PERM} for directory, and use
* {@link FileContext#FILE_DEFAULT_PERM} for file.
* This constant is kept for compatibility.
*/
public static final FsPermission DEFAULT_PERM = FsPermission.getDefault();
/**
* Default permission for directory
*/
public static final FsPermission DIR_DEFAULT_PERM = FsPermission.getDirDefault();
/**
* Default permission for file
*/
public static final FsPermission FILE_DEFAULT_PERM = FsPermission.getFileDefault();
/**
* Priority of the FileContext shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 20;
/**
* List of files that should be deleted on JVM shutdown.
*/
static final Map<FileContext, Set<Path>> DELETE_ON_EXIT =
new IdentityHashMap<FileContext, Set<Path>>();
/** JVM shutdown hook thread. */
static final FileContextFinalizer FINALIZER =
new FileContextFinalizer();
private static final PathFilter DEFAULT_FILTER = new PathFilter() {
@Override
public boolean accept(final Path file) {
return true;
}
};
/**
* The FileContext is defined by.
* 1) defaultFS (slash)
* 2) wd
* 3) umask
*/
private final AbstractFileSystem defaultFS; //default FS for this FileContext.
private Path workingDir; // Fully qualified
private FsPermission umask;
private final Configuration conf;
private final UserGroupInformation ugi;
final boolean resolveSymlinks;
private FileContext(final AbstractFileSystem defFs,
final FsPermission theUmask, final Configuration aConf) {
defaultFS = defFs;
umask = FsPermission.getUMask(aConf);
conf = aConf;
try {
ugi = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
LOG.error("Exception in getCurrentUser: ",e);
throw new RuntimeException("Failed to get the current user " +
"while creating a FileContext", e);
}
/*
* Init the wd.
* WorkingDir is implemented at the FileContext layer
* NOT at the AbstractFileSystem layer.
* If the DefaultFS, such as localFilesystem has a notion of
* builtin WD, we use that as the initial WD.
* Otherwise the WD is initialized to the home directory.
*/
workingDir = defaultFS.getInitialWorkingDirectory();
if (workingDir == null) {
workingDir = defaultFS.getHomeDirectory();
}
resolveSymlinks = conf.getBoolean(
CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY,
CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_DEFAULT);
util = new Util(); // for the inner class
}
/*
* Remove relative part - return "absolute":
* If input is relative path ("foo/bar") add wd: ie "/<workingDir>/foo/bar"
* A fully qualified uri ("hdfs://nn:p/foo/bar") or a slash-relative path
* ("/foo/bar") are returned unchanged.
*
* Applications that use FileContext should use #makeQualified() since
* they really want a fully qualified URI.
* Hence this method is not called makeAbsolute() and
* has been deliberately declared private.
*/
Path fixRelativePart(Path p) {
Preconditions.checkNotNull(p, "path cannot be null");
if (p.isUriPathAbsolute()) {
return p;
} else {
return new Path(workingDir, p);
}
}
/**
* Delete all the paths that were marked as delete-on-exit.
*/
static void processDeleteOnExit() {
synchronized (DELETE_ON_EXIT) {
Set<Entry<FileContext, Set<Path>>> set = DELETE_ON_EXIT.entrySet();
for (Entry<FileContext, Set<Path>> entry : set) {
FileContext fc = entry.getKey();
Set<Path> paths = entry.getValue();
for (Path path : paths) {
try {
fc.delete(path, true);
} catch (IOException e) {
LOG.warn("Ignoring failure to deleteOnExit for path " + path);
}
}
}
DELETE_ON_EXIT.clear();
}
}
/**
* Get the file system of supplied path.
*
* @param absOrFqPath - absolute or fully qualified path
* @return the file system of the path
*
* @throws UnsupportedFileSystemException If the file system for
* <code>absOrFqPath</code> is not supported.
* @throws IOExcepton If the file system for <code>absOrFqPath</code> could
* not be instantiated.
*/
protected AbstractFileSystem getFSofPath(final Path absOrFqPath)
throws UnsupportedFileSystemException, IOException {
absOrFqPath.checkNotSchemeWithRelative();
absOrFqPath.checkNotRelative();
try {
// Is it the default FS for this FileContext?
defaultFS.checkPath(absOrFqPath);
return defaultFS;
} catch (Exception e) { // it is different FileSystem
return getAbstractFileSystem(ugi, absOrFqPath.toUri(), conf);
}
}
private static AbstractFileSystem getAbstractFileSystem(
UserGroupInformation user, final URI uri, final Configuration conf)
throws UnsupportedFileSystemException, IOException {
try {
return user.doAs(new PrivilegedExceptionAction<AbstractFileSystem>() {
@Override
public AbstractFileSystem run() throws UnsupportedFileSystemException {
return AbstractFileSystem.get(uri, conf);
}
});
} catch (InterruptedException ex) {
LOG.error(ex);
throw new IOException("Failed to get the AbstractFileSystem for path: "
+ uri, ex);
}
}
/**
* Protected Static Factory methods for getting a FileContexts
* that take a AbstractFileSystem as input. To be used for testing.
*/
/**
* Create a FileContext with specified FS as default using the specified
* config.
*
* @param defFS
* @param aConf
* @return new FileContext with specifed FS as default.
*/
public static FileContext getFileContext(final AbstractFileSystem defFS,
final Configuration aConf) {
return new FileContext(defFS, FsPermission.getUMask(aConf), aConf);
}
/**
* Create a FileContext for specified file system using the default config.
*
* @param defaultFS
* @return a FileContext with the specified AbstractFileSystem
* as the default FS.
*/
protected static FileContext getFileContext(
final AbstractFileSystem defaultFS) {
return getFileContext(defaultFS, new Configuration());
}
/**
* Static Factory methods for getting a FileContext.
* Note new file contexts are created for each call.
* The only singleton is the local FS context using the default config.
*
* Methods that use the default config: the default config read from the
* $HADOOP_CONFIG/core.xml,
* Unspecified key-values for config are defaulted from core-defaults.xml
* in the release jar.
*
* The keys relevant to the FileContext layer are extracted at time of
* construction. Changes to the config after the call are ignore
* by the FileContext layer.
* The conf is passed to lower layers like AbstractFileSystem and HDFS which
* pick up their own config variables.
*/
/**
* Create a FileContext using the default config read from the
* $HADOOP_CONFIG/core.xml, Unspecified key-values for config are defaulted
* from core-defaults.xml in the release jar.
*
* @throws UnsupportedFileSystemException If the file system from the default
* configuration is not supported
*/
public static FileContext getFileContext()
throws UnsupportedFileSystemException {
return getFileContext(new Configuration());
}
/**
* @return a FileContext for the local file system using the default config.
* @throws UnsupportedFileSystemException If the file system for
* {@link FsConstants#LOCAL_FS_URI} is not supported.
*/
public static FileContext getLocalFSFileContext()
throws UnsupportedFileSystemException {
return getFileContext(FsConstants.LOCAL_FS_URI);
}
/**
* Create a FileContext for specified URI using the default config.
*
* @param defaultFsUri
* @return a FileContext with the specified URI as the default FS.
*
* @throws UnsupportedFileSystemException If the file system for
* <code>defaultFsUri</code> is not supported
*/
public static FileContext getFileContext(final URI defaultFsUri)
throws UnsupportedFileSystemException {
return getFileContext(defaultFsUri, new Configuration());
}
/**
* Create a FileContext for specified default URI using the specified config.
*
* @param defaultFsUri
* @param aConf
* @return new FileContext for specified uri
* @throws UnsupportedFileSystemException If the file system with specified is
* not supported
* @throws RuntimeException If the file system specified is supported but
* could not be instantiated, or if login fails.
*/
public static FileContext getFileContext(final URI defaultFsUri,
final Configuration aConf) throws UnsupportedFileSystemException {
UserGroupInformation currentUser = null;
AbstractFileSystem defaultAfs = null;
if (defaultFsUri.getScheme() == null) {
return getFileContext(aConf);
}
try {
currentUser = UserGroupInformation.getCurrentUser();
defaultAfs = getAbstractFileSystem(currentUser, defaultFsUri, aConf);
} catch (UnsupportedFileSystemException ex) {
throw ex;
} catch (IOException ex) {
LOG.error(ex);
throw new RuntimeException(ex);
}
return getFileContext(defaultAfs, aConf);
}
/**
* Create a FileContext using the passed config. Generally it is better to use
* {@link #getFileContext(URI, Configuration)} instead of this one.
*
*
* @param aConf
* @return new FileContext
* @throws UnsupportedFileSystemException If file system in the config
* is not supported
*/
public static FileContext getFileContext(final Configuration aConf)
throws UnsupportedFileSystemException {
final URI defaultFsUri = URI.create(aConf.get(FS_DEFAULT_NAME_KEY,
FS_DEFAULT_NAME_DEFAULT));
if ( defaultFsUri.getScheme() != null
&& !defaultFsUri.getScheme().trim().isEmpty()) {
return getFileContext(defaultFsUri, aConf);
}
throw new UnsupportedFileSystemException(String.format(
"%s: URI configured via %s carries no scheme",
defaultFsUri, FS_DEFAULT_NAME_KEY));
}
/**
* @param aConf - from which the FileContext is configured
* @return a FileContext for the local file system using the specified config.
*
* @throws UnsupportedFileSystemException If default file system in the config
* is not supported
*
*/
public static FileContext getLocalFSFileContext(final Configuration aConf)
throws UnsupportedFileSystemException {
return getFileContext(FsConstants.LOCAL_FS_URI, aConf);
}
/* This method is needed for tests. */
@InterfaceAudience.Private
@InterfaceStability.Unstable /* return type will change to AFS once
HADOOP-6223 is completed */
public AbstractFileSystem getDefaultFileSystem() {
return defaultFS;
}
/**
* Set the working directory for wd-relative names (such a "foo/bar"). Working
* directory feature is provided by simply prefixing relative names with the
* working dir. Note this is different from Unix where the wd is actually set
* to the inode. Hence setWorkingDir does not follow symlinks etc. This works
* better in a distributed environment that has multiple independent roots.
* {@link #getWorkingDirectory()} should return what setWorkingDir() set.
*
* @param newWDir new working directory
* @throws IOException
* <br>
* NewWdir can be one of:
* <ul>
* <li>relative path: "foo/bar";</li>
* <li>absolute without scheme: "/foo/bar"</li>
* <li>fully qualified with scheme: "xx://auth/foo/bar"</li>
* </ul>
* <br>
* Illegal WDs:
* <ul>
* <li>relative with scheme: "xx:foo/bar"</li>
* <li>non existent directory</li>
* </ul>
*/
public void setWorkingDirectory(final Path newWDir) throws IOException {
newWDir.checkNotSchemeWithRelative();
/* wd is stored as a fully qualified path. We check if the given
* path is not relative first since resolve requires and returns
* an absolute path.
*/
final Path newWorkingDir = new Path(workingDir, newWDir);
FileStatus status = getFileStatus(newWorkingDir);
if (status.isFile()) {
throw new FileNotFoundException("Cannot setWD to a file");
}
workingDir = newWorkingDir;
}
/**
* Gets the working directory for wd-relative names (such a "foo/bar").
*/
public Path getWorkingDirectory() {
return workingDir;
}
/**
* Gets the ugi in the file-context
* @return UserGroupInformation
*/
public UserGroupInformation getUgi() {
return ugi;
}
/**
* Return the current user's home directory in this file system.
* The default implementation returns "/user/$USER/".
* @return the home directory
*/
public Path getHomeDirectory() {
return defaultFS.getHomeDirectory();
}
/**
*
* @return the umask of this FileContext
*/
public FsPermission getUMask() {
return umask;
}
/**
* Set umask to the supplied parameter.
* @param newUmask the new umask
*/
public void setUMask(final FsPermission newUmask) {
umask = newUmask;
}
/**
* Resolve the path following any symlinks or mount points
* @param f to be resolved
* @return fully qualified resolved path
*
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws AccessControlException if access denied
* @throws IOException If an IO Error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*
* RuntimeExceptions:
* @throws InvalidPathException If path <code>f</code> is not valid
*/
public Path resolvePath(final Path f) throws FileNotFoundException,
UnresolvedLinkException, AccessControlException, IOException {
return resolve(f);
}
/**
* Make the path fully qualified if it is isn't.
* A Fully-qualified path has scheme and authority specified and an absolute
* path.
* Use the default file system and working dir in this FileContext to qualify.
* @param path
* @return qualified path
*/
public Path makeQualified(final Path path) {
return path.makeQualified(defaultFS.getUri(), getWorkingDirectory());
}
/**
* Create or overwrite file on indicated path and returns an output stream for
* writing into the file.
*
* @param f the file name to open
* @param createFlag gives the semantics of create; see {@link CreateFlag}
* @param opts file creation options; see {@link Options.CreateOpts}.
* <ul>
* <li>Progress - to report progress on the operation - default null
* <li>Permission - umask is applied against permisssion: default is
* FsPermissions:getDefault()
*
* <li>CreateParent - create missing parent path; default is to not
* to create parents
* <li>The defaults for the following are SS defaults of the file
* server implementing the target path. Not all parameters make sense
* for all kinds of file system - eg. localFS ignores Blocksize,
* replication, checksum
* <ul>
* <li>BufferSize - buffersize used in FSDataOutputStream
* <li>Blocksize - block size for file blocks
* <li>ReplicationFactor - replication for blocks
* <li>ChecksumParam - Checksum parameters. server default is used
* if not specified.
* </ul>
* </ul>
*
* @return {@link FSDataOutputStream} for created file
*
* @throws AccessControlException If access is denied
* @throws FileAlreadyExistsException If file <code>f</code> already exists
* @throws FileNotFoundException If parent of <code>f</code> does not exist
* and <code>createParent</code> is false
* @throws ParentNotDirectoryException If parent of <code>f</code> is not a
* directory.
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*
* RuntimeExceptions:
* @throws InvalidPathException If path <code>f</code> is not valid
*/
public FSDataOutputStream create(final Path f,
final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, IOException {
Path absF = fixRelativePart(f);
// If one of the options is a permission, extract it & apply umask
// If not, add a default Perms and apply umask;
// AbstractFileSystem#create
CreateOpts.Perms permOpt = CreateOpts.getOpt(CreateOpts.Perms.class, opts);
FsPermission permission = (permOpt != null) ? permOpt.getValue() :
FILE_DEFAULT_PERM;
permission = permission.applyUMask(umask);
final CreateOpts[] updatedOpts =
CreateOpts.setOpt(CreateOpts.perms(permission), opts);
return new FSLinkResolver<FSDataOutputStream>() {
@Override
public FSDataOutputStream next(final AbstractFileSystem fs, final Path p)
throws IOException {
return fs.create(p, createFlag, updatedOpts);
}
}.resolve(this, absF);
}
/**
* Make(create) a directory and all the non-existent parents.
*
* @param dir - the dir to make
* @param permission - permissions is set permission&~umask
* @param createParent - if true then missing parent dirs are created if false
* then parent must exist
*
* @throws AccessControlException If access is denied
* @throws FileAlreadyExistsException If directory <code>dir</code> already
* exists
* @throws FileNotFoundException If parent of <code>dir</code> does not exist
* and <code>createParent</code> is false
* @throws ParentNotDirectoryException If parent of <code>dir</code> is not a
* directory
* @throws UnsupportedFileSystemException If file system for <code>dir</code>
* is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*
* RuntimeExceptions:
* @throws InvalidPathException If path <code>dir</code> is not valid
*/
public void mkdir(final Path dir, final FsPermission permission,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
IOException {
final Path absDir = fixRelativePart(dir);
final FsPermission absFerms = (permission == null ?
FsPermission.getDirDefault() : permission).applyUMask(umask);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.mkdir(p, absFerms, createParent);
return null;
}
}.resolve(this, absDir);
}
/**
* Delete a file.
* @param f the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*
* RuntimeExceptions:
* @throws InvalidPathException If path <code>f</code> is invalid
*/
public boolean delete(final Path f, final boolean recursive)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
Path absF = fixRelativePart(f);
return new FSLinkResolver<Boolean>() {
@Override
public Boolean next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return Boolean.valueOf(fs.delete(p, recursive));
}
}.resolve(this, absF);
}
/**
* Opens an FSDataInputStream at the indicated Path using
* default buffersize.
* @param f the file name to open
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If file <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code>
* is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public FSDataInputStream open(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<FSDataInputStream>() {
@Override
public FSDataInputStream next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.open(p);
}
}.resolve(this, absF);
}
/**
* Opens an FSDataInputStream at the indicated Path.
*
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If file <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public FSDataInputStream open(final Path f, final int bufferSize)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<FSDataInputStream>() {
@Override
public FSDataInputStream next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.open(p, bufferSize);
}
}.resolve(this, absF);
}
/**
* Truncate the file in the indicated path to the indicated size.
* <ul>
* <li>Fails if path is a directory.
* <li>Fails if path does not exist.
* <li>Fails if path is not closed.
* <li>Fails if new size is greater than current size.
* </ul>
* @param f The path to the file to be truncated
* @param newLength The size the file is to be truncated to
*
* @return <code>true</code> if the file has been truncated to the desired
* <code>newLength</code> and is immediately available to be reused for
* write operations such as <code>append</code>, or
* <code>false</code> if a background process of adjusting the length of
* the last block has been started, and clients should wait for it to
* complete before proceeding with further file updates.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If file <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public boolean truncate(final Path f, final long newLength)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<Boolean>() {
@Override
public Boolean next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.truncate(p, newLength);
}
}.resolve(this, absF);
}
/**
* Set replication for an existing file.
*
* @param f file name
* @param replication new replication
*
* @return true if successful
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If file <code>f</code> does not exist
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public boolean setReplication(final Path f, final short replication)
throws AccessControlException, FileNotFoundException,
IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<Boolean>() {
@Override
public Boolean next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return Boolean.valueOf(fs.setReplication(p, replication));
}
}.resolve(this, absF);
}
/**
* Renames Path src to Path dst
* <ul>
* <li
* <li>Fails if src is a file and dst is a directory.
* <li>Fails if src is a directory and dst is a file.
* <li>Fails if the parent of dst does not exist or is a file.
* </ul>
* <p>
* If OVERWRITE option is not passed as an argument, rename fails if the dst
* already exists.
* <p>
* If OVERWRITE option is passed as an argument, rename overwrites the dst if
* it is a file or an empty directory. Rename fails if dst is a non-empty
* directory.
* <p>
* Note that atomicity of rename is dependent on the file system
* implementation. Please refer to the file system documentation for details
* <p>
*
* @param src path to be renamed
* @param dst new path after rename
*
* @throws AccessControlException If access is denied
* @throws FileAlreadyExistsException If <code>dst</code> already exists and
* <code>options</options> has {@link Options.Rename#OVERWRITE}
* option false.
* @throws FileNotFoundException If <code>src</code> does not exist
* @throws ParentNotDirectoryException If parent of <code>dst</code> is not a
* directory
* @throws UnsupportedFileSystemException If file system for <code>src</code>
* and <code>dst</code> is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public void rename(final Path src, final Path dst,
final Options.Rename... options) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
IOException {
final Path absSrc = fixRelativePart(src);
final Path absDst = fixRelativePart(dst);
AbstractFileSystem srcFS = getFSofPath(absSrc);
AbstractFileSystem dstFS = getFSofPath(absDst);
if(!srcFS.getUri().equals(dstFS.getUri())) {
throw new IOException("Renames across AbstractFileSystems not supported");
}
try {
srcFS.rename(absSrc, absDst, options);
} catch (UnresolvedLinkException e) {
/* We do not know whether the source or the destination path
* was unresolved. Resolve the source path up until the final
* path component, then fully resolve the destination.
*/
final Path source = resolveIntermediate(absSrc);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.rename(source, p, options);
return null;
}
}.resolve(this, absDst);
}
}
/**
* Set permission of a path.
* @param f
* @param permission - the new absolute permission (umask is not applied)
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code>
* is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public void setPermission(final Path f, final FsPermission permission)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.setPermission(p, permission);
return null;
}
}.resolve(this, absF);
}
/**
* Set owner of a path (i.e. a file or a directory). The parameters username
* and groupname cannot both be null.
*
* @param f The path
* @param username If it is null, the original username remains unchanged.
* @param groupname If it is null, the original groupname remains unchanged.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*
* RuntimeExceptions:
* @throws HadoopIllegalArgumentException If <code>username</code> or
* <code>groupname</code> is invalid.
*/
public void setOwner(final Path f, final String username,
final String groupname) throws AccessControlException,
UnsupportedFileSystemException, FileNotFoundException,
IOException {
if ((username == null) && (groupname == null)) {
throw new HadoopIllegalArgumentException(
"username and groupname cannot both be null");
}
final Path absF = fixRelativePart(f);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.setOwner(p, username, groupname);
return null;
}
}.resolve(this, absF);
}
/**
* Set access time of a file.
* @param f The path
* @param mtime Set the modification time of this file.
* The number of milliseconds since epoch (Jan 1, 1970).
* A value of -1 means that this call should not set modification time.
* @param atime Set the access time of this file.
* The number of milliseconds since Jan 1, 1970.
* A value of -1 means that this call should not set access time.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public void setTimes(final Path f, final long mtime, final long atime)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.setTimes(p, mtime, atime);
return null;
}
}.resolve(this, absF);
}
/**
* Get the checksum of a file.
*
* @param f file path
*
* @return The file checksum. The default return value is null,
* which indicates that no checksum algorithm is implemented
* in the corresponding FileSystem.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public FileChecksum getFileChecksum(final Path f)
throws AccessControlException, FileNotFoundException,
IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<FileChecksum>() {
@Override
public FileChecksum next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileChecksum(p);
}
}.resolve(this, absF);
}
/**
* Set the verify checksum flag for the file system denoted by the path.
* This is only applicable if the
* corresponding FileSystem supports checksum. By default doesn't do anything.
* @param verifyChecksum
* @param f set the verifyChecksum for the Filesystem containing this path
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public void setVerifyChecksum(final boolean verifyChecksum, final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absF = resolve(fixRelativePart(f));
getFSofPath(absF).setVerifyChecksum(verifyChecksum);
}
/**
* Return a file status object that represents the path.
* @param f The path we want information from
*
* @return a FileStatus object
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public FileStatus getFileStatus(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<FileStatus>() {
@Override
public FileStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileStatus(p);
}
}.resolve(this, absF);
}
/**
* Checks if the user can access a path. The mode specifies which access
* checks to perform. If the requested permissions are granted, then the
* method returns normally. If access is denied, then the method throws an
* {@link AccessControlException}.
* <p/>
* The default implementation of this method calls {@link #getFileStatus(Path)}
* and checks the returned permissions against the requested permissions.
* Note that the getFileStatus call will be subject to authorization checks.
* Typically, this requires search (execute) permissions on each directory in
* the path's prefix, but this is implementation-defined. Any file system
* that provides a richer authorization model (such as ACLs) may override the
* default implementation so that it checks against that model instead.
* <p>
* In general, applications should avoid using this method, due to the risk of
* time-of-check/time-of-use race conditions. The permissions on a file may
* change immediately after the access call returns. Most applications should
* prefer running specific file system actions as the desired user represented
* by a {@link UserGroupInformation}.
*
* @param path Path to check
* @param mode type of access to check
* @throws AccessControlException if access is denied
* @throws FileNotFoundException if the path does not exist
* @throws UnsupportedFileSystemException if file system for <code>path</code>
* is not supported
* @throws IOException see specific implementation
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
public void access(final Path path, final FsAction mode)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absPath = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(AbstractFileSystem fs, Path p) throws IOException,
UnresolvedLinkException {
fs.access(p, mode);
return null;
}
}.resolve(this, absPath);
}
/**
* Return a file status object that represents the path. If the path
* refers to a symlink then the FileStatus of the symlink is returned.
* The behavior is equivalent to #getFileStatus() if the underlying
* file system does not support symbolic links.
* @param f The path we want information from.
* @return A FileStatus object
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*/
public FileStatus getFileLinkStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<FileStatus>() {
@Override
public FileStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
FileStatus fi = fs.getFileLinkStatus(p);
if (fi.isSymlink()) {
fi.setSymlink(FSLinkResolver.qualifySymlinkTarget(fs.getUri(), p,
fi.getSymlink()));
}
return fi;
}
}.resolve(this, absF);
}
/**
* Returns the target of the given symbolic link as it was specified
* when the link was created. Links in the path leading up to the
* final path component are resolved transparently.
*
* @param f the path to return the target of
* @return The un-interpreted target of the symbolic link.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If path <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If the given path does not refer to a symlink
* or an I/O error occurred
*/
public Path getLinkTarget(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<Path>() {
@Override
public Path next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
FileStatus fi = fs.getFileLinkStatus(p);
return fi.getSymlink();
}
}.resolve(this, absF);
}
/**
* Return blockLocation of the given file for the given offset and len.
* For a nonexistent file or regions, null will be returned.
*
* This call is most helpful with DFS, where it returns
* hostnames of machines that contain the given file.
*
* @param f - get blocklocations of this file
* @param start position (byte offset)
* @param len (in bytes)
*
* @return block locations for given file at specified offset of len
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*
* RuntimeExceptions:
* @throws InvalidPathException If path <code>f</code> is invalid
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public BlockLocation[] getFileBlockLocations(final Path f, final long start,
final long len) throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<BlockLocation[]>() {
@Override
public BlockLocation[] next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileBlockLocations(p, start, len);
}
}.resolve(this, absF);
}
/**
* Returns a status object describing the use and capacity of the
* file system denoted by the Parh argument p.
* If the file system has multiple partitions, the
* use and capacity of the partition pointed to by the specified
* path is reflected.
*
* @param f Path for which status should be obtained. null means the
* root partition of the default file system.
*
* @return a FsStatus object
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public FsStatus getFsStatus(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException, IOException {
if (f == null) {
return defaultFS.getFsStatus();
}
final Path absF = fixRelativePart(f);
return new FSLinkResolver<FsStatus>() {
@Override
public FsStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFsStatus(p);
}
}.resolve(this, absF);
}
/**
* Creates a symbolic link to an existing file. An exception is thrown if
* the symlink exits, the user does not have permission to create symlink,
* or the underlying file system does not support symlinks.
*
* Symlink permissions are ignored, access to a symlink is determined by
* the permissions of the symlink target.
*
* Symlinks in paths leading up to the final path component are resolved
* transparently. If the final path component refers to a symlink some
* functions operate on the symlink itself, these are:
* - delete(f) and deleteOnExit(f) - Deletes the symlink.
* - rename(src, dst) - If src refers to a symlink, the symlink is
* renamed. If dst refers to a symlink, the symlink is over-written.
* - getLinkTarget(f) - Returns the target of the symlink.
* - getFileLinkStatus(f) - Returns a FileStatus object describing
* the symlink.
* Some functions, create() and mkdir(), expect the final path component
* does not exist. If they are given a path that refers to a symlink that
* does exist they behave as if the path referred to an existing file or
* directory. All other functions fully resolve, ie follow, the symlink.
* These are: open, setReplication, setOwner, setTimes, setWorkingDirectory,
* setPermission, getFileChecksum, setVerifyChecksum, getFileBlockLocations,
* getFsStatus, getFileStatus, exists, and listStatus.
*
* Symlink targets are stored as given to createSymlink, assuming the
* underlying file system is capable of storing a fully qualified URI.
* Dangling symlinks are permitted. FileContext supports four types of
* symlink targets, and resolves them as follows
* <pre>
* Given a path referring to a symlink of form:
*
* <---X--->
* fs://host/A/B/link
* <-----Y----->
*
* In this path X is the scheme and authority that identify the file system,
* and Y is the path leading up to the final path component "link". If Y is
* a symlink itself then let Y' be the target of Y and X' be the scheme and
* authority of Y'. Symlink targets may:
*
* 1. Fully qualified URIs
*
* fs://hostX/A/B/file Resolved according to the target file system.
*
* 2. Partially qualified URIs (eg scheme but no host)
*
* fs:///A/B/file Resolved according to the target file system. Eg resolving
* a symlink to hdfs:///A results in an exception because
* HDFS URIs must be fully qualified, while a symlink to
* file:///A will not since Hadoop's local file systems
* require partially qualified URIs.
*
* 3. Relative paths
*
* path Resolves to [Y'][path]. Eg if Y resolves to hdfs://host/A and path
* is "../B/file" then [Y'][path] is hdfs://host/B/file
*
* 4. Absolute paths
*
* path Resolves to [X'][path]. Eg if Y resolves hdfs://host/A/B and path
* is "/file" then [X][path] is hdfs://host/file
* </pre>
*
* @param target the target of the symbolic link
* @param link the path to be created that points to target
* @param createParent if true then missing parent dirs are created if
* false then parent must exist
*
*
* @throws AccessControlException If access is denied
* @throws FileAlreadyExistsException If file <code>linkcode> already exists
* @throws FileNotFoundException If <code>target</code> does not exist
* @throws ParentNotDirectoryException If parent of <code>link</code> is not a
* directory.
* @throws UnsupportedFileSystemException If file system for
* <code>target</code> or <code>link</code> is not supported
* @throws IOException If an I/O error occurred
*/
@SuppressWarnings("deprecation")
public void createSymlink(final Path target, final Path link,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
IOException {
if (!FileSystem.areSymlinksEnabled()) {
throw new UnsupportedOperationException("Symlinks not supported");
}
final Path nonRelLink = fixRelativePart(link);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
fs.createSymlink(target, p, createParent);
return null;
}
}.resolve(this, nonRelLink);
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param f is the path
*
* @return an iterator that traverses statuses of the files/directories
* in the given path
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public RemoteIterator<FileStatus> listStatus(final Path f) throws
AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<RemoteIterator<FileStatus>>() {
@Override
public RemoteIterator<FileStatus> next(
final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.listStatusIterator(p);
}
}.resolve(this, absF);
}
/**
* @return an iterator over the corrupt files under the given path
* (may contain duplicates if a file has more than one corrupt block)
* @throws IOException
*/
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException {
final Path absF = fixRelativePart(path);
return new FSLinkResolver<RemoteIterator<Path>>() {
@Override
public RemoteIterator<Path> next(final AbstractFileSystem fs,
final Path p)
throws IOException, UnresolvedLinkException {
return fs.listCorruptFileBlocks(p);
}
}.resolve(this, absF);
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
* Return the file's status and block locations If the path is a file.
*
* If a returned status is a file, it contains the file's block locations.
*
* @param f is the path
*
* @return an iterator that traverses statuses of the files/directories
* in the given path
* If any IO exception (for example the input directory gets deleted while
* listing is being executed), next() or hasNext() of the returned iterator
* may throw a RuntimeException with the io exception as the cause.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public RemoteIterator<LocatedFileStatus> listLocatedStatus(
final Path f) throws
AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<RemoteIterator<LocatedFileStatus>>() {
@Override
public RemoteIterator<LocatedFileStatus> next(
final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.listLocatedStatus(p);
}
}.resolve(this, absF);
}
/**
* Mark a path to be deleted on JVM shutdown.
*
* @param f the existing path to delete.
*
* @return true if deleteOnExit is successful, otherwise false.
*
* @throws AccessControlException If access is denied
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public boolean deleteOnExit(Path f) throws AccessControlException,
IOException {
if (!this.util().exists(f)) {
return false;
}
synchronized (DELETE_ON_EXIT) {
if (DELETE_ON_EXIT.isEmpty()) {
ShutdownHookManager.get().addShutdownHook(FINALIZER, SHUTDOWN_HOOK_PRIORITY);
}
Set<Path> set = DELETE_ON_EXIT.get(this);
if (set == null) {
set = new TreeSet<Path>();
DELETE_ON_EXIT.put(this, set);
}
set.add(f);
}
return true;
}
private final Util util;
public Util util() {
return util;
}
/**
* Utility/library methods built over the basic FileContext methods.
* Since this are library functions, the oprtation are not atomic
* and some of them may partially complete if other threads are making
* changes to the same part of the name space.
*/
public class Util {
/**
* Does the file exist?
* Note: Avoid using this method if you already have FileStatus in hand.
* Instead reuse the FileStatus
* @param f the file or dir to be checked
*
* @throws AccessControlException If access is denied
* @throws IOException If an I/O error occurred
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public boolean exists(final Path f) throws AccessControlException,
UnsupportedFileSystemException, IOException {
try {
FileStatus fs = FileContext.this.getFileStatus(f);
assert fs != null;
return true;
} catch (FileNotFoundException e) {
return false;
}
}
/**
* Return the {@link ContentSummary} of path f.
* @param f path
*
* @return the {@link ContentSummary} of path f.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for
* <code>f</code> is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public ContentSummary getContentSummary(Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
FileStatus status = FileContext.this.getFileStatus(f);
if (status.isFile()) {
long length = status.getLen();
return new ContentSummary.Builder().length(length).
fileCount(1).directoryCount(0).spaceConsumed(length).
build();
}
long[] summary = {0, 0, 1};
RemoteIterator<FileStatus> statusIterator =
FileContext.this.listStatus(f);
while(statusIterator.hasNext()) {
FileStatus s = statusIterator.next();
long length = s.getLen();
ContentSummary c = s.isDirectory() ? getContentSummary(s.getPath()) :
new ContentSummary.Builder().length(length).fileCount(1).
directoryCount(0).spaceConsumed(length).build();
summary[0] += c.getLength();
summary[1] += c.getFileCount();
summary[2] += c.getDirectoryCount();
}
return new ContentSummary.Builder().length(summary[0]).
fileCount(summary[1]).directoryCount(summary[2]).
spaceConsumed(summary[0]).build();
}
/**
* See {@link #listStatus(Path[], PathFilter)}
*/
public FileStatus[] listStatus(Path[] files) throws AccessControlException,
FileNotFoundException, IOException {
return listStatus(files, DEFAULT_FILTER);
}
/**
* Filter files/directories in the given path using the user-supplied path
* filter.
*
* @param f is the path name
* @param filter is the user-supplied path filter
*
* @return an array of FileStatus objects for the files under the given path
* after applying the filter
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for
* <code>pathPattern</code> is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public FileStatus[] listStatus(Path f, PathFilter filter)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
ArrayList<FileStatus> results = new ArrayList<FileStatus>();
listStatus(results, f, filter);
return results.toArray(new FileStatus[results.size()]);
}
/**
* Filter files/directories in the given list of paths using user-supplied
* path filter.
*
* @param files is a list of paths
* @param filter is the filter
*
* @return a list of statuses for the files under the given paths after
* applying the filter
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If a file in <code>files</code> does not
* exist
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public FileStatus[] listStatus(Path[] files, PathFilter filter)
throws AccessControlException, FileNotFoundException, IOException {
ArrayList<FileStatus> results = new ArrayList<FileStatus>();
for (int i = 0; i < files.length; i++) {
listStatus(results, files[i], filter);
}
return results.toArray(new FileStatus[results.size()]);
}
/*
* Filter files/directories in the given path using the user-supplied path
* filter. Results are added to the given array <code>results</code>.
*/
private void listStatus(ArrayList<FileStatus> results, Path f,
PathFilter filter) throws AccessControlException,
FileNotFoundException, IOException {
FileStatus[] listing = listStatus(f);
if (listing != null) {
for (int i = 0; i < listing.length; i++) {
if (filter.accept(listing[i].getPath())) {
results.add(listing[i]);
}
}
}
}
/**
* List the statuses of the files/directories in the given path
* if the path is a directory.
*
* @param f is the path
*
* @return an array that contains statuses of the files/directories
* in the given path
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public FileStatus[] listStatus(final Path f) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException,
IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<FileStatus[]>() {
@Override
public FileStatus[] next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.listStatus(p);
}
}.resolve(FileContext.this, absF);
}
/**
* List the statuses and block locations of the files in the given path.
*
* If the path is a directory,
* if recursive is false, returns files in the directory;
* if recursive is true, return files in the subtree rooted at the path.
* The subtree is traversed in the depth-first order.
* If the path is a file, return the file's status and block locations.
* Files across symbolic links are also returned.
*
* @param f is the path
* @param recursive if the subdirectories need to be traversed recursively
*
* @return an iterator that traverses statuses of the files
* If any IO exception (for example a sub-directory gets deleted while
* listing is being executed), next() or hasNext() of the returned iterator
* may throw a RuntimeException with the IO exception as the cause.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code>
* is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public RemoteIterator<LocatedFileStatus> listFiles(
final Path f, final boolean recursive) throws AccessControlException,
FileNotFoundException, UnsupportedFileSystemException,
IOException {
return new RemoteIterator<LocatedFileStatus>() {
private Stack<RemoteIterator<LocatedFileStatus>> itors =
new Stack<RemoteIterator<LocatedFileStatus>>();
RemoteIterator<LocatedFileStatus> curItor = listLocatedStatus(f);
LocatedFileStatus curFile;
/**
* Returns <tt>true</tt> if the iterator has more files.
*
* @return <tt>true</tt> if the iterator has more files.
* @throws AccessControlException if not allowed to access next
* file's status or locations
* @throws FileNotFoundException if next file does not exist any more
* @throws UnsupportedFileSystemException if next file's
* fs is unsupported
* @throws IOException for all other IO errors
* for example, NameNode is not avaialbe or
* NameNode throws IOException due to an error
* while getting the status or block locations
*/
@Override
public boolean hasNext() throws IOException {
while (curFile == null) {
if (curItor.hasNext()) {
handleFileStat(curItor.next());
} else if (!itors.empty()) {
curItor = itors.pop();
} else {
return false;
}
}
return true;
}
/**
* Process the input stat.
* If it is a file, return the file stat.
* If it is a directory, traverse the directory if recursive is true;
* ignore it if recursive is false.
* If it is a symlink, resolve the symlink first and then process it
* depending on if it is a file or directory.
* @param stat input status
* @throws AccessControlException if access is denied
* @throws FileNotFoundException if file is not found
* @throws UnsupportedFileSystemException if fs is not supported
* @throws IOException for all other IO errors
*/
private void handleFileStat(LocatedFileStatus stat)
throws IOException {
if (stat.isFile()) { // file
curFile = stat;
} else if (stat.isSymlink()) { // symbolic link
// resolve symbolic link
FileStatus symstat = FileContext.this.getFileStatus(
stat.getSymlink());
if (symstat.isFile() || (recursive && symstat.isDirectory())) {
itors.push(curItor);
curItor = listLocatedStatus(stat.getPath());
}
} else if (recursive) { // directory
itors.push(curItor);
curItor = listLocatedStatus(stat.getPath());
}
}
/**
* Returns the next file's status with its block locations
*
* @throws AccessControlException if not allowed to access next
* file's status or locations
* @throws FileNotFoundException if next file does not exist any more
* @throws UnsupportedFileSystemException if next file's
* fs is unsupported
* @throws IOException for all other IO errors
* for example, NameNode is not avaialbe or
* NameNode throws IOException due to an error
* while getting the status or block locations
*/
@Override
public LocatedFileStatus next() throws IOException {
if (hasNext()) {
LocatedFileStatus result = curFile;
curFile = null;
return result;
}
throw new java.util.NoSuchElementException("No more entry in " + f);
}
};
}
/**
* <p>Return all the files that match filePattern and are not checksum
* files. Results are sorted by their names.
*
* <p>
* A filename pattern is composed of <i>regular</i> characters and
* <i>special pattern matching</i> characters, which are:
*
* <dl>
* <dd>
* <dl>
* <p>
* <dt> <tt> ? </tt>
* <dd> Matches any single character.
*
* <p>
* <dt> <tt> * </tt>
* <dd> Matches zero or more characters.
*
* <p>
* <dt> <tt> [<i>abc</i>] </tt>
* <dd> Matches a single character from character set
* <tt>{<i>a,b,c</i>}</tt>.
*
* <p>
* <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
* <dd> Matches a single character from the character range
* <tt>{<i>a...b</i>}</tt>. Note: character <tt><i>a</i></tt> must be
* lexicographically less than or equal to character <tt><i>b</i></tt>.
*
* <p>
* <dt> <tt> [^<i>a</i>] </tt>
* <dd> Matches a single char that is not from character set or range
* <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur
* immediately to the right of the opening bracket.
*
* <p>
* <dt> <tt> \<i>c</i> </tt>
* <dd> Removes (escapes) any special meaning of character <i>c</i>.
*
* <p>
* <dt> <tt> {ab,cd} </tt>
* <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
*
* <p>
* <dt> <tt> {ab,c{de,fh}} </tt>
* <dd> Matches a string from string set <tt>{<i>ab, cde, cfh</i>}</tt>
*
* </dl>
* </dd>
* </dl>
*
* @param pathPattern a regular expression specifying a pth pattern
*
* @return an array of paths that match the path pattern
*
* @throws AccessControlException If access is denied
* @throws UnsupportedFileSystemException If file system for
* <code>pathPattern</code> is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public FileStatus[] globStatus(Path pathPattern)
throws AccessControlException, UnsupportedFileSystemException,
IOException {
return new Globber(FileContext.this, pathPattern, DEFAULT_FILTER).glob();
}
/**
* Return an array of FileStatus objects whose path names match pathPattern
* and is accepted by the user-supplied path filter. Results are sorted by
* their path names.
* Return null if pathPattern has no glob and the path does not exist.
* Return an empty array if pathPattern has a glob and no path matches it.
*
* @param pathPattern regular expression specifying the path pattern
* @param filter user-supplied path filter
*
* @return an array of FileStatus objects
*
* @throws AccessControlException If access is denied
* @throws UnsupportedFileSystemException If file system for
* <code>pathPattern</code> is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public FileStatus[] globStatus(final Path pathPattern,
final PathFilter filter) throws AccessControlException,
UnsupportedFileSystemException, IOException {
return new Globber(FileContext.this, pathPattern, filter).glob();
}
/**
* Copy file from src to dest. See
* {@link #copy(Path, Path, boolean, boolean)}
*/
public boolean copy(final Path src, final Path dst)
throws AccessControlException, FileAlreadyExistsException,
FileNotFoundException, ParentNotDirectoryException,
UnsupportedFileSystemException, IOException {
return copy(src, dst, false, false);
}
/**
* Copy from src to dst, optionally deleting src and overwriting dst.
* @param src
* @param dst
* @param deleteSource - delete src if true
* @param overwrite overwrite dst if true; throw IOException if dst exists
* and overwrite is false.
*
* @return true if copy is successful
*
* @throws AccessControlException If access is denied
* @throws FileAlreadyExistsException If <code>dst</code> already exists
* @throws FileNotFoundException If <code>src</code> does not exist
* @throws ParentNotDirectoryException If parent of <code>dst</code> is not
* a directory
* @throws UnsupportedFileSystemException If file system for
* <code>src</code> or <code>dst</code> is not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*
* RuntimeExceptions:
* @throws InvalidPathException If path <code>dst</code> is invalid
*/
public boolean copy(final Path src, final Path dst, boolean deleteSource,
boolean overwrite) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
IOException {
src.checkNotSchemeWithRelative();
dst.checkNotSchemeWithRelative();
Path qSrc = makeQualified(src);
Path qDst = makeQualified(dst);
checkDest(qSrc.getName(), qDst, overwrite);
FileStatus fs = FileContext.this.getFileStatus(qSrc);
if (fs.isDirectory()) {
checkDependencies(qSrc, qDst);
mkdir(qDst, FsPermission.getDirDefault(), true);
FileStatus[] contents = listStatus(qSrc);
for (FileStatus content : contents) {
copy(makeQualified(content.getPath()), makeQualified(new Path(qDst,
content.getPath().getName())), deleteSource, overwrite);
}
} else {
InputStream in=null;
OutputStream out = null;
try {
in = open(qSrc);
EnumSet<CreateFlag> createFlag = overwrite ? EnumSet.of(
CreateFlag.CREATE, CreateFlag.OVERWRITE) :
EnumSet.of(CreateFlag.CREATE);
out = create(qDst, createFlag);
IOUtils.copyBytes(in, out, conf, true);
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
}
}
if (deleteSource) {
return delete(qSrc, true);
} else {
return true;
}
}
}
/**
* Check if copying srcName to dst would overwrite an existing
* file or directory.
* @param srcName File or directory to be copied.
* @param dst Destination to copy srcName to.
* @param overwrite Whether it's ok to overwrite an existing file.
* @throws AccessControlException If access is denied.
* @throws IOException If dst is an existing directory, or dst is an
* existing file and the overwrite option is not passed.
*/
private void checkDest(String srcName, Path dst, boolean overwrite)
throws AccessControlException, IOException {
try {
FileStatus dstFs = getFileStatus(dst);
if (dstFs.isDirectory()) {
if (null == srcName) {
throw new IOException("Target " + dst + " is a directory");
}
// Recurse to check if dst/srcName exists.
checkDest(null, new Path(dst, srcName), overwrite);
} else if (!overwrite) {
throw new IOException("Target " + new Path(dst, srcName)
+ " already exists");
}
} catch (FileNotFoundException e) {
// dst does not exist - OK to copy.
}
}
//
// If the destination is a subdirectory of the source, then
// generate exception
//
private static void checkDependencies(Path qualSrc, Path qualDst)
throws IOException {
if (isSameFS(qualSrc, qualDst)) {
String srcq = qualSrc.toString() + Path.SEPARATOR;
String dstq = qualDst.toString() + Path.SEPARATOR;
if (dstq.startsWith(srcq)) {
if (srcq.length() == dstq.length()) {
throw new IOException("Cannot copy " + qualSrc + " to itself.");
} else {
throw new IOException("Cannot copy " + qualSrc +
" to its subdirectory " + qualDst);
}
}
}
}
/**
* Are qualSrc and qualDst of the same file system?
* @param qualPath1 - fully qualified path
* @param qualPath2 - fully qualified path
* @return
*/
private static boolean isSameFS(Path qualPath1, Path qualPath2) {
URI srcUri = qualPath1.toUri();
URI dstUri = qualPath2.toUri();
return (srcUri.getScheme().equals(dstUri.getScheme()) &&
!(srcUri.getAuthority() != null && dstUri.getAuthority() != null && srcUri
.getAuthority().equals(dstUri.getAuthority())));
}
/**
* Deletes all the paths in deleteOnExit on JVM shutdown.
*/
static class FileContextFinalizer implements Runnable {
@Override
public synchronized void run() {
processDeleteOnExit();
}
}
/**
* Resolves all symbolic links in the specified path.
* Returns the new path object.
*/
protected Path resolve(final Path f) throws FileNotFoundException,
UnresolvedLinkException, AccessControlException, IOException {
return new FSLinkResolver<Path>() {
@Override
public Path next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.resolvePath(p);
}
}.resolve(this, f);
}
/**
* Resolves all symbolic links in the specified path leading up
* to, but not including the final path component.
* @param f path to resolve
* @return the new path object.
*/
protected Path resolveIntermediate(final Path f) throws IOException {
return new FSLinkResolver<FileStatus>() {
@Override
public FileStatus next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.getFileLinkStatus(p);
}
}.resolve(this, f).getPath();
}
/**
* Returns the list of AbstractFileSystems accessed in the path. The list may
* contain more than one AbstractFileSystems objects in case of symlinks.
*
* @param f
* Path which needs to be resolved
* @return List of AbstractFileSystems accessed in the path
* @throws IOException
*/
Set<AbstractFileSystem> resolveAbstractFileSystems(final Path f)
throws IOException {
final Path absF = fixRelativePart(f);
final HashSet<AbstractFileSystem> result
= new HashSet<AbstractFileSystem>();
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
result.add(fs);
fs.getFileStatus(p);
return null;
}
}.resolve(this, absF);
return result;
}
/**
* Get the statistics for a particular file system
*
* @param uri
* the uri to lookup the statistics. Only scheme and authority part
* of the uri are used as the key to store and lookup.
* @return a statistics object
*/
public static Statistics getStatistics(URI uri) {
return AbstractFileSystem.getStatistics(uri);
}
/**
* Clears all the statistics stored in AbstractFileSystem, for all the file
* systems.
*/
public static void clearStatistics() {
AbstractFileSystem.clearStatistics();
}
/**
* Prints the statistics to standard output. File System is identified by the
* scheme and authority.
*/
public static void printStatistics() {
AbstractFileSystem.printStatistics();
}
/**
* @return Map of uri and statistics for each filesystem instantiated. The uri
* consists of scheme and authority for the filesystem.
*/
public static Map<URI, Statistics> getAllStatistics() {
return AbstractFileSystem.getAllStatistics();
}
/**
* Get delegation tokens for the file systems accessed for a given
* path.
* @param p Path for which delegations tokens are requested.
* @param renewer the account name that is allowed to renew the token.
* @return List of delegation tokens.
* @throws IOException
*/
@InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
public List<Token<?>> getDelegationTokens(
Path p, String renewer) throws IOException {
Set<AbstractFileSystem> afsSet = resolveAbstractFileSystems(p);
List<Token<?>> tokenList =
new ArrayList<Token<?>>();
for (AbstractFileSystem afs : afsSet) {
List<Token<?>> afsTokens = afs.getDelegationTokens(renewer);
tokenList.addAll(afsTokens);
}
return tokenList;
}
/**
* Modifies ACL entries of files and directories. This method can add new ACL
* entries or modify the permissions on existing ACL entries. All existing
* ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.)
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications
* @throws IOException if an ACL could not be modified
*/
public void modifyAclEntries(final Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException {
fs.modifyAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* Removes ACL entries from files and directories. Other ACL entries are
* retained.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing entries to remove
* @throws IOException if an ACL could not be modified
*/
public void removeAclEntries(final Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException {
fs.removeAclEntries(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* Removes all default ACL entries from files and directories.
*
* @param path Path to modify
* @throws IOException if an ACL could not be modified
*/
public void removeDefaultAcl(Path path)
throws IOException {
Path absF = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException {
fs.removeDefaultAcl(p);
return null;
}
}.resolve(this, absF);
}
/**
* Removes all but the base ACL entries of files and directories. The entries
* for user, group, and others are retained for compatibility with permission
* bits.
*
* @param path Path to modify
* @throws IOException if an ACL could not be removed
*/
public void removeAcl(Path path) throws IOException {
Path absF = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException {
fs.removeAcl(p);
return null;
}
}.resolve(this, absF);
}
/**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications, must include entries
* for user, group, and others for compatibility with permission bits.
* @throws IOException if an ACL could not be modified
*/
public void setAcl(Path path, final List<AclEntry> aclSpec)
throws IOException {
Path absF = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException {
fs.setAcl(p, aclSpec);
return null;
}
}.resolve(this, absF);
}
/**
* Gets the ACLs of files and directories.
*
* @param path Path to get
* @return RemoteIterator<AclStatus> which returns each AclStatus
* @throws IOException if an ACL could not be read
*/
public AclStatus getAclStatus(Path path) throws IOException {
Path absF = fixRelativePart(path);
return new FSLinkResolver<AclStatus>() {
@Override
public AclStatus next(final AbstractFileSystem fs, final Path p)
throws IOException {
return fs.getAclStatus(p);
}
}.resolve(this, absF);
}
/**
* Set an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to modify
* @param name xattr name.
* @param value xattr value.
* @throws IOException
*/
public void setXAttr(Path path, String name, byte[] value)
throws IOException {
setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
}
/**
* Set an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to modify
* @param name xattr name.
* @param value xattr value.
* @param flag xattr set flag
* @throws IOException
*/
public void setXAttr(Path path, final String name, final byte[] value,
final EnumSet<XAttrSetFlag> flag) throws IOException {
final Path absF = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException {
fs.setXAttr(p, name, value, flag);
return null;
}
}.resolve(this, absF);
}
/**
* Get an xattr for a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attribute
* @param name xattr name.
* @return byte[] xattr value.
* @throws IOException
*/
public byte[] getXAttr(Path path, final String name) throws IOException {
final Path absF = fixRelativePart(path);
return new FSLinkResolver<byte[]>() {
@Override
public byte[] next(final AbstractFileSystem fs, final Path p)
throws IOException {
return fs.getXAttr(p, name);
}
}.resolve(this, absF);
}
/**
* Get all of the xattrs for a file or directory.
* Only those xattrs for which the logged-in user has permissions to view
* are returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @throws IOException
*/
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
final Path absF = fixRelativePart(path);
return new FSLinkResolver<Map<String, byte[]>>() {
@Override
public Map<String, byte[]> next(final AbstractFileSystem fs, final Path p)
throws IOException {
return fs.getXAttrs(p);
}
}.resolve(this, absF);
}
/**
* Get all of the xattrs for a file or directory.
* Only those xattrs for which the logged-in user has permissions to view
* are returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @param names XAttr names.
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @throws IOException
*/
public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
throws IOException {
final Path absF = fixRelativePart(path);
return new FSLinkResolver<Map<String, byte[]>>() {
@Override
public Map<String, byte[]> next(final AbstractFileSystem fs, final Path p)
throws IOException {
return fs.getXAttrs(p, names);
}
}.resolve(this, absF);
}
/**
* Remove an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to remove extended attribute
* @param name xattr name
* @throws IOException
*/
public void removeXAttr(Path path, final String name) throws IOException {
final Path absF = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException {
fs.removeXAttr(p, name);
return null;
}
}.resolve(this, absF);
}
/**
* Get all of the xattr names for a file or directory.
* Only those xattr names which the logged-in user has permissions to view
* are returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return List<String> of the XAttr names of the file or directory
* @throws IOException
*/
public List<String> listXAttrs(Path path) throws IOException {
final Path absF = fixRelativePart(path);
return new FSLinkResolver<List<String>>() {
@Override
public List<String> next(final AbstractFileSystem fs, final Path p)
throws IOException {
return fs.listXAttrs(p);
}
}.resolve(this, absF);
}
/**
* Create a snapshot with a default name.
*
* @param path The directory where snapshots will be taken.
* @return the snapshot path.
*
* @throws IOException If an I/O error occurred
*
* <p>Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public final Path createSnapshot(Path path) throws IOException {
return createSnapshot(path, null);
}
/**
* Create a snapshot.
*
* @param path The directory where snapshots will be taken.
* @param snapshotName The name of the snapshot
* @return the snapshot path.
*
* @throws IOException If an I/O error occurred
*
* <p>Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public Path createSnapshot(final Path path, final String snapshotName)
throws IOException {
final Path absF = fixRelativePart(path);
return new FSLinkResolver<Path>() {
@Override
public Path next(final AbstractFileSystem fs, final Path p)
throws IOException {
return fs.createSnapshot(p, snapshotName);
}
}.resolve(this, absF);
}
/**
* Rename a snapshot.
*
* @param path The directory path where the snapshot was taken
* @param snapshotOldName Old name of the snapshot
* @param snapshotNewName New name of the snapshot
*
* @throws IOException If an I/O error occurred
*
* <p>Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public void renameSnapshot(final Path path, final String snapshotOldName,
final String snapshotNewName) throws IOException {
final Path absF = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException {
fs.renameSnapshot(p, snapshotOldName, snapshotNewName);
return null;
}
}.resolve(this, absF);
}
/**
* Delete a snapshot of a directory.
*
* @param path The directory that the to-be-deleted snapshot belongs to
* @param snapshotName The name of the snapshot
*
* @throws IOException If an I/O error occurred
*
* <p>Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public void deleteSnapshot(final Path path, final String snapshotName)
throws IOException {
final Path absF = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException {
fs.deleteSnapshot(p, snapshotName);
return null;
}
}.resolve(this, absF);
}
/**
* Set the storage policy for a given file or directory.
*
* @param path file or directory path.
* @param policyName the name of the target storage policy. The list
* of supported Storage policies can be retrieved
* via {@link #getAllStoragePolicies}.
*/
public void setStoragePolicy(final Path path, final String policyName)
throws IOException {
final Path absF = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(final AbstractFileSystem fs, final Path p)
throws IOException {
fs.setStoragePolicy(path, policyName);
return null;
}
}.resolve(this, absF);
}
/**
* Query the effective storage policy ID for the given file or directory.
*
* @param src file or directory path.
* @return storage policy for give file.
* @throws IOException
*/
public BlockStoragePolicySpi getStoragePolicy(Path path) throws IOException {
final Path absF = fixRelativePart(path);
return new FSLinkResolver<BlockStoragePolicySpi>() {
@Override
public BlockStoragePolicySpi next(final AbstractFileSystem fs,
final Path p)
throws IOException {
return fs.getStoragePolicy(p);
}
}.resolve(this, absF);
}
/**
* Retrieve all the storage policies supported by this file system.
*
* @return all storage policies supported by this filesystem.
* @throws IOException
*/
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
throws IOException {
return defaultFS.getAllStoragePolicies();
}
}
| 106,259 | 37.85192 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
|
package org.apache.hadoop.fs;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
/**
* A <code>FilterFs</code> contains some other file system, which it uses as its
* basic file system, possibly transforming the data along the way or providing
* additional functionality. The class <code>FilterFs</code> itself simply
* overrides all methods of <code>AbstractFileSystem</code> with versions that
* pass all requests to the contained file system. Subclasses of
* <code>FilterFs</code> may further override some of these methods and may also
* provide additional methods and fields.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
public abstract class FilterFs extends AbstractFileSystem {
private final AbstractFileSystem myFs;
protected AbstractFileSystem getMyFs() {
return myFs;
}
protected FilterFs(AbstractFileSystem fs) throws URISyntaxException {
super(fs.getUri(), fs.getUri().getScheme(),
fs.getUri().getAuthority() != null, fs.getUriDefaultPort());
myFs = fs;
}
@Override
public Statistics getStatistics() {
return myFs.getStatistics();
}
@Override
public Path makeQualified(Path path) {
return myFs.makeQualified(path);
}
@Override
public Path getInitialWorkingDirectory() {
return myFs.getInitialWorkingDirectory();
}
@Override
public Path getHomeDirectory() {
return myFs.getHomeDirectory();
}
@Override
public FSDataOutputStream createInternal(Path f,
EnumSet<CreateFlag> flag, FsPermission absolutePermission, int bufferSize,
short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt, boolean createParent)
throws IOException, UnresolvedLinkException {
checkPath(f);
return myFs.createInternal(f, flag, absolutePermission, bufferSize,
replication, blockSize, progress, checksumOpt, createParent);
}
@Override
public boolean delete(Path f, boolean recursive)
throws IOException, UnresolvedLinkException {
checkPath(f);
return myFs.delete(f, recursive);
}
@Override
public BlockLocation[] getFileBlockLocations(Path f, long start, long len)
throws IOException, UnresolvedLinkException {
checkPath(f);
return myFs.getFileBlockLocations(f, start, len);
}
@Override
public FileChecksum getFileChecksum(Path f)
throws IOException, UnresolvedLinkException {
checkPath(f);
return myFs.getFileChecksum(f);
}
@Override
public FileStatus getFileStatus(Path f)
throws IOException, UnresolvedLinkException {
checkPath(f);
return myFs.getFileStatus(f);
}
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
checkPath(path);
myFs.access(path, mode);
}
@Override
public FileStatus getFileLinkStatus(final Path f)
throws IOException, UnresolvedLinkException {
checkPath(f);
return myFs.getFileLinkStatus(f);
}
@Override
public FsStatus getFsStatus(final Path f) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
return myFs.getFsStatus(f);
}
@Override
public FsStatus getFsStatus() throws IOException {
return myFs.getFsStatus();
}
@Override
public FsServerDefaults getServerDefaults() throws IOException {
return myFs.getServerDefaults();
}
@Override
public Path resolvePath(final Path p) throws FileNotFoundException,
UnresolvedLinkException, AccessControlException, IOException {
return myFs.resolvePath(p);
}
@Override
public int getUriDefaultPort() {
return myFs.getUriDefaultPort();
}
@Override
public URI getUri() {
return myFs.getUri();
}
@Override
public void checkPath(Path path) {
myFs.checkPath(path);
}
@Override
public String getUriPath(final Path p) {
return myFs.getUriPath(p);
}
@Override
public FileStatus[] listStatus(Path f)
throws IOException, UnresolvedLinkException {
checkPath(f);
return myFs.listStatus(f);
}
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
checkPath(f);
return myFs.listLocatedStatus(f);
}
@Override
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException {
return myFs.listCorruptFileBlocks(path);
}
@Override
public void mkdir(Path dir, FsPermission permission, boolean createParent)
throws IOException, UnresolvedLinkException {
checkPath(dir);
myFs.mkdir(dir, permission, createParent);
}
@Override
public FSDataInputStream open(final Path f) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
checkPath(f);
return myFs.open(f);
}
@Override
public FSDataInputStream open(Path f, int bufferSize)
throws IOException, UnresolvedLinkException {
checkPath(f);
return myFs.open(f, bufferSize);
}
@Override
public boolean truncate(Path f, long newLength)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
checkPath(f);
return myFs.truncate(f, newLength);
}
@Override
public void renameInternal(Path src, Path dst)
throws IOException, UnresolvedLinkException {
checkPath(src);
checkPath(dst);
myFs.rename(src, dst, Options.Rename.NONE);
}
@Override
public void renameInternal(final Path src, final Path dst,
boolean overwrite) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnresolvedLinkException, IOException {
myFs.renameInternal(src, dst, overwrite);
}
@Override
public void setOwner(Path f, String username, String groupname)
throws IOException, UnresolvedLinkException {
checkPath(f);
myFs.setOwner(f, username, groupname);
}
@Override
public void setPermission(Path f, FsPermission permission)
throws IOException, UnresolvedLinkException {
checkPath(f);
myFs.setPermission(f, permission);
}
@Override
public boolean setReplication(Path f, short replication)
throws IOException, UnresolvedLinkException {
checkPath(f);
return myFs.setReplication(f, replication);
}
@Override
public void setTimes(Path f, long mtime, long atime)
throws IOException, UnresolvedLinkException {
checkPath(f);
myFs.setTimes(f, mtime, atime);
}
@Override
public void setVerifyChecksum(boolean verifyChecksum)
throws IOException, UnresolvedLinkException {
myFs.setVerifyChecksum(verifyChecksum);
}
@Override
public boolean supportsSymlinks() {
return myFs.supportsSymlinks();
}
@Override
public void createSymlink(Path target, Path link, boolean createParent)
throws IOException, UnresolvedLinkException {
myFs.createSymlink(target, link, createParent);
}
@Override
public Path getLinkTarget(final Path f) throws IOException {
return myFs.getLinkTarget(f);
}
@Override // AbstractFileSystem
public String getCanonicalServiceName() {
return myFs.getCanonicalServiceName();
}
@Override // AbstractFileSystem
public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
return myFs.getDelegationTokens(renewer);
}
@Override
public boolean isValidName(String src) {
return myFs.isValidName(src);
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
myFs.modifyAclEntries(path, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
myFs.removeAclEntries(path, aclSpec);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
myFs.removeDefaultAcl(path);
}
@Override
public void removeAcl(Path path) throws IOException {
myFs.removeAcl(path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
myFs.setAcl(path, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
return myFs.getAclStatus(path);
}
@Override
public void setXAttr(Path path, String name, byte[] value)
throws IOException {
myFs.setXAttr(path, name, value);
}
@Override
public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
myFs.setXAttr(path, name, value, flag);
}
@Override
public byte[] getXAttr(Path path, String name) throws IOException {
return myFs.getXAttr(path, name);
}
@Override
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
return myFs.getXAttrs(path);
}
@Override
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
throws IOException {
return myFs.getXAttrs(path, names);
}
@Override
public List<String> listXAttrs(Path path) throws IOException {
return myFs.listXAttrs(path);
}
@Override
public void removeXAttr(Path path, String name) throws IOException {
myFs.removeXAttr(path, name);
}
@Override
public Path createSnapshot(final Path path, final String snapshotName)
throws IOException {
return myFs.createSnapshot(path, snapshotName);
}
@Override
public void renameSnapshot(final Path path, final String snapshotOldName,
final String snapshotNewName) throws IOException {
myFs.renameSnapshot(path, snapshotOldName, snapshotNewName);
}
@Override
public void deleteSnapshot(final Path path, final String snapshotName)
throws IOException {
myFs.deleteSnapshot(path, snapshotName);
}
@Override
public void setStoragePolicy(Path path, String policyName)
throws IOException {
myFs.setStoragePolicy(path, policyName);
}
@Override
public BlockStoragePolicySpi getStoragePolicy(final Path src)
throws IOException {
return myFs.getStoragePolicy(src);
}
@Override
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
throws IOException {
return myFs.getAllStoragePolicies();
}
}
| 11,904 | 27.345238 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Represents the network location of a block, information about the hosts
* that contain block replicas, and other block metadata (E.g. the file
* offset associated with the block, length, whether it is corrupt, etc).
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class BlockLocation {
private String[] hosts; // Datanode hostnames
private String[] cachedHosts; // Datanode hostnames with a cached replica
private String[] names; // Datanode IP:xferPort for accessing the block
private String[] topologyPaths; // Full path name in network topology
private long offset; // Offset of the block in the file
private long length;
private boolean corrupt;
private static final String[] EMPTY_STR_ARRAY = new String[0];
/**
* Default Constructor
*/
public BlockLocation() {
this(EMPTY_STR_ARRAY, EMPTY_STR_ARRAY, 0L, 0L);
}
/**
* Copy constructor
*/
public BlockLocation(BlockLocation that) {
this.hosts = that.hosts;
this.cachedHosts = that.cachedHosts;
this.names = that.names;
this.topologyPaths = that.topologyPaths;
this.offset = that.offset;
this.length = that.length;
this.corrupt = that.corrupt;
}
/**
* Constructor with host, name, offset and length
*/
public BlockLocation(String[] names, String[] hosts, long offset,
long length) {
this(names, hosts, offset, length, false);
}
/**
* Constructor with host, name, offset, length and corrupt flag
*/
public BlockLocation(String[] names, String[] hosts, long offset,
long length, boolean corrupt) {
this(names, hosts, null, offset, length, corrupt);
}
/**
* Constructor with host, name, network topology, offset and length
*/
public BlockLocation(String[] names, String[] hosts, String[] topologyPaths,
long offset, long length) {
this(names, hosts, topologyPaths, offset, length, false);
}
/**
* Constructor with host, name, network topology, offset, length
* and corrupt flag
*/
public BlockLocation(String[] names, String[] hosts, String[] topologyPaths,
long offset, long length, boolean corrupt) {
this(names, hosts, null, topologyPaths, offset, length, corrupt);
}
public BlockLocation(String[] names, String[] hosts, String[] cachedHosts,
String[] topologyPaths, long offset, long length, boolean corrupt) {
if (names == null) {
this.names = EMPTY_STR_ARRAY;
} else {
this.names = names;
}
if (hosts == null) {
this.hosts = EMPTY_STR_ARRAY;
} else {
this.hosts = hosts;
}
if (cachedHosts == null) {
this.cachedHosts = EMPTY_STR_ARRAY;
} else {
this.cachedHosts = cachedHosts;
}
if (topologyPaths == null) {
this.topologyPaths = EMPTY_STR_ARRAY;
} else {
this.topologyPaths = topologyPaths;
}
this.offset = offset;
this.length = length;
this.corrupt = corrupt;
}
/**
* Get the list of hosts (hostname) hosting this block
*/
public String[] getHosts() throws IOException {
return hosts;
}
/**
* Get the list of hosts (hostname) hosting a cached replica of the block
*/
public String[] getCachedHosts() {
return cachedHosts;
}
/**
* Get the list of names (IP:xferPort) hosting this block
*/
public String[] getNames() throws IOException {
return names;
}
/**
* Get the list of network topology paths for each of the hosts.
* The last component of the path is the "name" (IP:xferPort).
*/
public String[] getTopologyPaths() throws IOException {
return topologyPaths;
}
/**
* Get the start offset of file associated with this block
*/
public long getOffset() {
return offset;
}
/**
* Get the length of the block
*/
public long getLength() {
return length;
}
/**
* Get the corrupt flag.
*/
public boolean isCorrupt() {
return corrupt;
}
/**
* Set the start offset of file associated with this block
*/
public void setOffset(long offset) {
this.offset = offset;
}
/**
* Set the length of block
*/
public void setLength(long length) {
this.length = length;
}
/**
* Set the corrupt flag.
*/
public void setCorrupt(boolean corrupt) {
this.corrupt = corrupt;
}
/**
* Set the hosts hosting this block
*/
public void setHosts(String[] hosts) throws IOException {
if (hosts == null) {
this.hosts = EMPTY_STR_ARRAY;
} else {
this.hosts = hosts;
}
}
/**
* Set the hosts hosting a cached replica of this block
*/
public void setCachedHosts(String[] cachedHosts) {
if (cachedHosts == null) {
this.cachedHosts = EMPTY_STR_ARRAY;
} else {
this.cachedHosts = cachedHosts;
}
}
/**
* Set the names (host:port) hosting this block
*/
public void setNames(String[] names) throws IOException {
if (names == null) {
this.names = EMPTY_STR_ARRAY;
} else {
this.names = names;
}
}
/**
* Set the network topology paths of the hosts
*/
public void setTopologyPaths(String[] topologyPaths) throws IOException {
if (topologyPaths == null) {
this.topologyPaths = EMPTY_STR_ARRAY;
} else {
this.topologyPaths = topologyPaths;
}
}
@Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(offset);
result.append(',');
result.append(length);
if (corrupt) {
result.append("(corrupt)");
}
for(String h: hosts) {
result.append(',');
result.append(h);
}
return result.toString();
}
}
| 6,693 | 25.458498 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.io.InputStream;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.URLConnection;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* Representation of a URL connection to open InputStreams.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class FsUrlConnection extends URLConnection {
private Configuration conf;
private InputStream is;
FsUrlConnection(Configuration conf, URL url) {
super(url);
this.conf = conf;
}
@Override
public void connect() throws IOException {
try {
FileSystem fs = FileSystem.get(url.toURI(), conf);
is = fs.open(new Path(url.getPath()));
} catch (URISyntaxException e) {
throw new IOException(e.toString());
}
}
@Override
public InputStream getInputStream() throws IOException {
if (is == null) {
connect();
}
return is;
}
}
| 1,867 | 27.738462 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.EOFException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.nio.channels.ClosedChannelException;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
/****************************************************************
* Abstract Checksumed FileSystem.
* It provide a basic implementation of a Checksumed FileSystem,
* which creates a checksum file for each raw file.
* It generates & verifies checksums at the client side.
*
*****************************************************************/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class ChecksumFileSystem extends FilterFileSystem {
private static final byte[] CHECKSUM_VERSION = new byte[] {'c', 'r', 'c', 0};
private int bytesPerChecksum = 512;
private boolean verifyChecksum = true;
private boolean writeChecksum = true;
public static double getApproxChkSumLength(long size) {
return ChecksumFSOutputSummer.CHKSUM_AS_FRACTION * size;
}
public ChecksumFileSystem(FileSystem fs) {
super(fs);
}
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf != null) {
bytesPerChecksum = conf.getInt(LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_KEY,
LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_DEFAULT);
}
}
/**
* Set whether to verify checksum.
*/
@Override
public void setVerifyChecksum(boolean verifyChecksum) {
this.verifyChecksum = verifyChecksum;
}
@Override
public void setWriteChecksum(boolean writeChecksum) {
this.writeChecksum = writeChecksum;
}
/** get the raw file system */
@Override
public FileSystem getRawFileSystem() {
return fs;
}
/** Return the name of the checksum file associated with a file.*/
public Path getChecksumFile(Path file) {
return new Path(file.getParent(), "." + file.getName() + ".crc");
}
/** Return true iff file is a checksum file name.*/
public static boolean isChecksumFile(Path file) {
String name = file.getName();
return name.startsWith(".") && name.endsWith(".crc");
}
/** Return the length of the checksum file given the size of the
* actual file.
**/
public long getChecksumFileLength(Path file, long fileSize) {
return getChecksumLength(fileSize, getBytesPerSum());
}
/** Return the bytes Per Checksum */
public int getBytesPerSum() {
return bytesPerChecksum;
}
private int getSumBufferSize(int bytesPerSum, int bufferSize) {
int defaultBufferSize = getConf().getInt(
LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_KEY,
LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT);
int proportionalBufferSize = bufferSize / bytesPerSum;
return Math.max(bytesPerSum,
Math.max(proportionalBufferSize, defaultBufferSize));
}
/*******************************************************
* For open()'s FSInputStream
* It verifies that data matches checksums.
*******************************************************/
private static class ChecksumFSInputChecker extends FSInputChecker {
private ChecksumFileSystem fs;
private FSDataInputStream datas;
private FSDataInputStream sums;
private static final int HEADER_LENGTH = 8;
private int bytesPerSum = 1;
public ChecksumFSInputChecker(ChecksumFileSystem fs, Path file)
throws IOException {
this(fs, file, fs.getConf().getInt(
LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_KEY,
LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT));
}
public ChecksumFSInputChecker(ChecksumFileSystem fs, Path file, int bufferSize)
throws IOException {
super( file, fs.getFileStatus(file).getReplication() );
this.datas = fs.getRawFileSystem().open(file, bufferSize);
this.fs = fs;
Path sumFile = fs.getChecksumFile(file);
try {
int sumBufferSize = fs.getSumBufferSize(fs.getBytesPerSum(), bufferSize);
sums = fs.getRawFileSystem().open(sumFile, sumBufferSize);
byte[] version = new byte[CHECKSUM_VERSION.length];
sums.readFully(version);
if (!Arrays.equals(version, CHECKSUM_VERSION))
throw new IOException("Not a checksum file: "+sumFile);
this.bytesPerSum = sums.readInt();
set(fs.verifyChecksum, DataChecksum.newCrc32(), bytesPerSum, 4);
} catch (FileNotFoundException e) { // quietly ignore
set(fs.verifyChecksum, null, 1, 0);
} catch (IOException e) { // loudly ignore
LOG.warn("Problem opening checksum file: "+ file +
". Ignoring exception: " , e);
set(fs.verifyChecksum, null, 1, 0);
}
}
private long getChecksumFilePos( long dataPos ) {
return HEADER_LENGTH + 4*(dataPos/bytesPerSum);
}
@Override
protected long getChunkPosition( long dataPos ) {
return dataPos/bytesPerSum*bytesPerSum;
}
@Override
public int available() throws IOException {
return datas.available() + super.available();
}
@Override
public int read(long position, byte[] b, int off, int len)
throws IOException {
// parameter check
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
if( position<0 ) {
throw new IllegalArgumentException(
"Parameter position can not to be negative");
}
ChecksumFSInputChecker checker = new ChecksumFSInputChecker(fs, file);
checker.seek(position);
int nread = checker.read(b, off, len);
checker.close();
return nread;
}
@Override
public void close() throws IOException {
datas.close();
if( sums != null ) {
sums.close();
}
set(fs.verifyChecksum, null, 1, 0);
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
long sumsPos = getChecksumFilePos(targetPos);
fs.reportChecksumFailure(file, datas, targetPos, sums, sumsPos);
boolean newDataSource = datas.seekToNewSource(targetPos);
return sums.seekToNewSource(sumsPos) || newDataSource;
}
@Override
protected int readChunk(long pos, byte[] buf, int offset, int len,
byte[] checksum) throws IOException {
boolean eof = false;
if (needChecksum()) {
assert checksum != null; // we have a checksum buffer
assert checksum.length % CHECKSUM_SIZE == 0; // it is sane length
assert len >= bytesPerSum; // we must read at least one chunk
final int checksumsToRead = Math.min(
len/bytesPerSum, // number of checksums based on len to read
checksum.length / CHECKSUM_SIZE); // size of checksum buffer
long checksumPos = getChecksumFilePos(pos);
if(checksumPos != sums.getPos()) {
sums.seek(checksumPos);
}
int sumLenRead = sums.read(checksum, 0, CHECKSUM_SIZE * checksumsToRead);
if (sumLenRead >= 0 && sumLenRead % CHECKSUM_SIZE != 0) {
throw new ChecksumException(
"Checksum file not a length multiple of checksum size " +
"in " + file + " at " + pos + " checksumpos: " + checksumPos +
" sumLenread: " + sumLenRead,
pos);
}
if (sumLenRead <= 0) { // we're at the end of the file
eof = true;
} else {
// Adjust amount of data to read based on how many checksum chunks we read
len = Math.min(len, bytesPerSum * (sumLenRead / CHECKSUM_SIZE));
}
}
if(pos != datas.getPos()) {
datas.seek(pos);
}
int nread = readFully(datas, buf, offset, len);
if (eof && nread > 0) {
throw new ChecksumException("Checksum error: "+file+" at "+pos, pos);
}
return nread;
}
}
private static class FSDataBoundedInputStream extends FSDataInputStream {
private FileSystem fs;
private Path file;
private long fileLen = -1L;
FSDataBoundedInputStream(FileSystem fs, Path file, InputStream in) {
super(in);
this.fs = fs;
this.file = file;
}
@Override
public boolean markSupported() {
return false;
}
/* Return the file length */
private long getFileLength() throws IOException {
if( fileLen==-1L ) {
fileLen = fs.getContentSummary(file).getLength();
}
return fileLen;
}
/**
* Skips over and discards <code>n</code> bytes of data from the
* input stream.
*
*The <code>skip</code> method skips over some smaller number of bytes
* when reaching end of file before <code>n</code> bytes have been skipped.
* The actual number of bytes skipped is returned. If <code>n</code> is
* negative, no bytes are skipped.
*
* @param n the number of bytes to be skipped.
* @return the actual number of bytes skipped.
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to skip to is corrupted
*/
@Override
public synchronized long skip(long n) throws IOException {
long curPos = getPos();
long fileLength = getFileLength();
if( n+curPos > fileLength ) {
n = fileLength - curPos;
}
return super.skip(n);
}
/**
* Seek to the given position in the stream.
* The next read() will be from that position.
*
* <p>This method does not allow seek past the end of the file.
* This produces IOException.
*
* @param pos the postion to seek to.
* @exception IOException if an I/O error occurs or seeks after EOF
* ChecksumException if the chunk to seek to is corrupted
*/
@Override
public synchronized void seek(long pos) throws IOException {
if (pos > getFileLength()) {
throw new EOFException("Cannot seek after EOF");
}
super.seek(pos);
}
}
/**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
FileSystem fs;
InputStream in;
if (verifyChecksum) {
fs = this;
in = new ChecksumFSInputChecker(this, f, bufferSize);
} else {
fs = getRawFileSystem();
in = fs.open(f, bufferSize);
}
return new FSDataBoundedInputStream(fs, f, in);
}
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
}
@Override
public boolean truncate(Path f, long newLength) throws IOException {
throw new IOException("Not supported");
}
/**
* Calculated the length of the checksum file in bytes.
* @param size the length of the data file in bytes
* @param bytesPerSum the number of bytes in a checksum block
* @return the number of bytes in the checksum file
*/
public static long getChecksumLength(long size, int bytesPerSum) {
//the checksum length is equal to size passed divided by bytesPerSum +
//bytes written in the beginning of the checksum file.
return ((size + bytesPerSum - 1) / bytesPerSum) * 4 +
CHECKSUM_VERSION.length + 4;
}
/** This class provides an output stream for a checksummed file.
* It generates checksums for data. */
private static class ChecksumFSOutputSummer extends FSOutputSummer {
private FSDataOutputStream datas;
private FSDataOutputStream sums;
private static final float CHKSUM_AS_FRACTION = 0.01f;
private boolean isClosed = false;
public ChecksumFSOutputSummer(ChecksumFileSystem fs,
Path file,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
Progressable progress,
FsPermission permission)
throws IOException {
super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
fs.getBytesPerSum()));
int bytesPerSum = fs.getBytesPerSum();
this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
bufferSize, replication, blockSize,
progress);
int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
permission, true, sumBufferSize,
replication, blockSize, null);
sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
sums.writeInt(bytesPerSum);
}
@Override
public void close() throws IOException {
try {
flushBuffer();
sums.close();
datas.close();
} finally {
isClosed = true;
}
}
@Override
protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
int ckoff, int cklen)
throws IOException {
datas.write(b, offset, len);
sums.write(checksum, ckoff, cklen);
}
@Override
protected void checkClosed() throws IOException {
if (isClosed) {
throw new ClosedChannelException();
}
}
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return create(f, permission, overwrite, true, bufferSize,
replication, blockSize, progress);
}
private FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, boolean createParent, int bufferSize,
short replication, long blockSize,
Progressable progress) throws IOException {
Path parent = f.getParent();
if (parent != null) {
if (!createParent && !exists(parent)) {
throw new FileNotFoundException("Parent directory doesn't exist: "
+ parent);
} else if (!mkdirs(parent)) {
throw new IOException("Mkdirs failed to create " + parent
+ " (exists=" + exists(parent) + ", cwd=" + getWorkingDirectory()
+ ")");
}
}
final FSDataOutputStream out;
if (writeChecksum) {
out = new FSDataOutputStream(
new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
blockSize, progress, permission), null);
} else {
out = fs.create(f, permission, overwrite, bufferSize, replication,
blockSize, progress);
// remove the checksum file since we aren't writing one
Path checkFile = getChecksumFile(f);
if (fs.exists(checkFile)) {
fs.delete(checkFile, true);
}
}
return out;
}
@Override
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return create(f, permission, overwrite, false, bufferSize, replication,
blockSize, progress);
}
/**
* Set replication for an existing file.
* Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
* @param src file name
* @param replication new replication
* @throws IOException
* @return true if successful;
* false if file does not exist or is a directory
*/
@Override
public boolean setReplication(Path src, short replication) throws IOException {
boolean value = fs.setReplication(src, replication);
if (!value)
return false;
Path checkFile = getChecksumFile(src);
if (exists(checkFile))
fs.setReplication(checkFile, replication);
return true;
}
/**
* Rename files/dirs
*/
@Override
public boolean rename(Path src, Path dst) throws IOException {
if (fs.isDirectory(src)) {
return fs.rename(src, dst);
} else {
if (fs.isDirectory(dst)) {
dst = new Path(dst, src.getName());
}
boolean value = fs.rename(src, dst);
if (!value)
return false;
Path srcCheckFile = getChecksumFile(src);
Path dstCheckFile = getChecksumFile(dst);
if (fs.exists(srcCheckFile)) { //try to rename checksum
value = fs.rename(srcCheckFile, dstCheckFile);
} else if (fs.exists(dstCheckFile)) {
// no src checksum, so remove dst checksum
value = fs.delete(dstCheckFile, true);
}
return value;
}
}
/**
* Implement the delete(Path, boolean) in checksum
* file system.
*/
@Override
public boolean delete(Path f, boolean recursive) throws IOException{
FileStatus fstatus = null;
try {
fstatus = fs.getFileStatus(f);
} catch(FileNotFoundException e) {
return false;
}
if (fstatus.isDirectory()) {
//this works since the crcs are in the same
//directories and the files. so we just delete
//everything in the underlying filesystem
return fs.delete(f, recursive);
} else {
Path checkFile = getChecksumFile(f);
if (fs.exists(checkFile)) {
fs.delete(checkFile, true);
}
return fs.delete(f, true);
}
}
final private static PathFilter DEFAULT_FILTER = new PathFilter() {
@Override
public boolean accept(Path file) {
return !isChecksumFile(file);
}
};
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param f
* given path
* @return the statuses of the files/directories in the given path
* @throws IOException
*/
@Override
public FileStatus[] listStatus(Path f) throws IOException {
return fs.listStatus(f, DEFAULT_FILTER);
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param f
* given path
* @return the statuses of the files/directories in the given patch
* @throws IOException
*/
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f)
throws IOException {
return fs.listLocatedStatus(f, DEFAULT_FILTER);
}
@Override
public boolean mkdirs(Path f) throws IOException {
return fs.mkdirs(f);
}
@Override
public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
Configuration conf = getConf();
FileUtil.copy(getLocal(conf), src, this, dst, delSrc, conf);
}
/**
* The src file is under FS, and the dst is on the local disk.
* Copy it from FS control to the local dst name.
*/
@Override
public void copyToLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
Configuration conf = getConf();
FileUtil.copy(this, src, getLocal(conf), dst, delSrc, conf);
}
/**
* The src file is under FS, and the dst is on the local disk.
* Copy it from FS control to the local dst name.
* If src and dst are directories, the copyCrc parameter
* determines whether to copy CRC files.
*/
public void copyToLocalFile(Path src, Path dst, boolean copyCrc)
throws IOException {
if (!fs.isDirectory(src)) { // source is a file
fs.copyToLocalFile(src, dst);
FileSystem localFs = getLocal(getConf()).getRawFileSystem();
if (localFs.isDirectory(dst)) {
dst = new Path(dst, src.getName());
}
dst = getChecksumFile(dst);
if (localFs.exists(dst)) { //remove old local checksum file
localFs.delete(dst, true);
}
Path checksumFile = getChecksumFile(src);
if (copyCrc && fs.exists(checksumFile)) { //copy checksum file
fs.copyToLocalFile(checksumFile, dst);
}
} else {
FileStatus[] srcs = listStatus(src);
for (FileStatus srcFile : srcs) {
copyToLocalFile(srcFile.getPath(),
new Path(dst, srcFile.getPath().getName()), copyCrc);
}
}
}
@Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
return tmpLocalFile;
}
@Override
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
moveFromLocalFile(tmpLocalFile, fsOutputFile);
}
/**
* Report a checksum error to the file system.
* @param f the file name containing the error
* @param in the stream open on the file
* @param inPos the position of the beginning of the bad data in the file
* @param sums the stream open on the checksum file
* @param sumsPos the position of the beginning of the bad data in the checksum file
* @return if retry is neccessary
*/
public boolean reportChecksumFailure(Path f, FSDataInputStream in,
long inPos, FSDataInputStream sums, long sumsPos) {
return false;
}
}
| 22,425 | 32.372024 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathOperationException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/** ENOTSUP */
public class PathOperationException extends PathExistsException {
static final long serialVersionUID = 0L;
/** @param path for the exception */
public PathOperationException(String path) {
super(path, "Operation not supported");
}
}
| 1,097 | 39.666667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathIOException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
/**
* Exceptions based on standard posix/linux style exceptions for path related
* errors. Returns an exception with the format "path: standard error string".
*
* This exception corresponds to Error Input/ouput(EIO)
*/
public class PathIOException extends IOException {
static final long serialVersionUID = 0L;
private static final String EIO = "Input/output error";
// NOTE: this really should be a Path, but a Path is buggy and won't
// return the exact string used to construct the path, and it mangles
// uris with no authority
private String operation;
private String path;
private String targetPath;
/**
* Constructor a generic I/O error exception
* @param path for the exception
*/
public PathIOException(String path) {
this(path, EIO);
}
/**
* Appends the text of a Throwable to the default error message
* @param path for the exception
* @param cause a throwable to extract the error message
*/
public PathIOException(String path, Throwable cause) {
this(path, EIO, cause);
}
/**
* Avoid using this method. Use a subclass of PathIOException if
* possible.
* @param path for the exception
* @param error custom string to use an the error text
*/
public PathIOException(String path, String error) {
super(error);
this.path = path;
}
protected PathIOException(String path, String error, Throwable cause) {
super(error, cause);
this.path = path;
}
/** Format:
* cmd: {operation} `path' {to `target'}: error string
*/
@Override
public String getMessage() {
StringBuilder message = new StringBuilder();
if (operation != null) {
message.append(operation + " ");
}
message.append(formatPath(path));
if (targetPath != null) {
message.append(" to " + formatPath(targetPath));
}
message.append(": " + super.getMessage());
if (getCause() != null) {
message.append(": " + getCause().getMessage());
}
return message.toString();
}
/** @return Path that generated the exception */
public Path getPath() { return new Path(path); }
/** @return Path if the operation involved copying or moving, else null */
public Path getTargetPath() {
return (targetPath != null) ? new Path(targetPath) : null;
}
/**
* Optional operation that will preface the path
* @param operation a string
*/
public void setOperation(String operation) {
this.operation = operation;
}
/**
* Optional path if the exception involved two paths, ex. a copy operation
* @param targetPath the of the operation
*/
public void setTargetPath(String targetPath) {
this.targetPath = targetPath;
}
private String formatPath(String path) {
return "`" + path + "'";
}
}
| 3,643 | 29.881356 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanUnbuffer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* FSDataInputStreams implement this interface to indicate that they can clear
* their buffers on request.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface CanUnbuffer {
/**
* Reduce the buffering. This will also free sockets and file descriptors
* held by the stream, if possible.
*/
public void unbuffer();
}
| 1,317 | 34.621622 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrSetFlag.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Stable
public enum XAttrSetFlag {
/**
* Create a new xattr.
* If the xattr exists already, exception will be thrown.
*/
CREATE((short) 0x01),
/**
* Replace a existing xattr.
* If the xattr does not exist, exception will be thrown.
*/
REPLACE((short) 0x02);
private final short flag;
private XAttrSetFlag(short flag) {
this.flag = flag;
}
short getFlag() {
return flag;
}
public static void validate(String xAttrName, boolean xAttrExists,
EnumSet<XAttrSetFlag> flag) throws IOException {
if (flag == null || flag.isEmpty()) {
throw new HadoopIllegalArgumentException("A flag must be specified.");
}
if (xAttrExists) {
if (!flag.contains(REPLACE)) {
throw new IOException("XAttr: " + xAttrName +
" already exists. The REPLACE flag must be specified.");
}
} else {
if (!flag.contains(CREATE)) {
throw new IOException("XAttr: " + xAttrName +
" does not exist. The CREATE flag must be specified.");
}
}
}
}
| 2,169 | 29.138889 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import java.io.DataOutputStream;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** Utility that wraps a {@link OutputStream} in a {@link DataOutputStream}.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FSDataOutputStream extends DataOutputStream
implements Syncable, CanSetDropBehind {
private final OutputStream wrappedStream;
private static class PositionCache extends FilterOutputStream {
private FileSystem.Statistics statistics;
long position;
public PositionCache(OutputStream out,
FileSystem.Statistics stats,
long pos) throws IOException {
super(out);
statistics = stats;
position = pos;
}
public void write(int b) throws IOException {
out.write(b);
position++;
if (statistics != null) {
statistics.incrementBytesWritten(1);
}
}
public void write(byte b[], int off, int len) throws IOException {
out.write(b, off, len);
position += len; // update position
if (statistics != null) {
statistics.incrementBytesWritten(len);
}
}
public long getPos() throws IOException {
return position; // return cached position
}
public void close() throws IOException {
// ensure close works even if a null reference was passed in
if (out != null) {
out.close();
}
}
}
@Deprecated
public FSDataOutputStream(OutputStream out) throws IOException {
this(out, null);
}
public FSDataOutputStream(OutputStream out, FileSystem.Statistics stats)
throws IOException {
this(out, stats, 0);
}
public FSDataOutputStream(OutputStream out, FileSystem.Statistics stats,
long startPosition) throws IOException {
super(new PositionCache(out, stats, startPosition));
wrappedStream = out;
}
/**
* Get the current position in the output stream.
*
* @return the current position in the output stream
*/
public long getPos() throws IOException {
return ((PositionCache)out).getPos();
}
/**
* Close the underlying output stream.
*/
public void close() throws IOException {
out.close(); // This invokes PositionCache.close()
}
/**
* Get a reference to the wrapped output stream.
*
* @return the underlying output stream
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
public OutputStream getWrappedStream() {
return wrappedStream;
}
@Override // Syncable
@Deprecated
public void sync() throws IOException {
if (wrappedStream instanceof Syncable) {
((Syncable)wrappedStream).sync();
}
}
@Override // Syncable
public void hflush() throws IOException {
if (wrappedStream instanceof Syncable) {
((Syncable)wrappedStream).hflush();
} else {
wrappedStream.flush();
}
}
@Override // Syncable
public void hsync() throws IOException {
if (wrappedStream instanceof Syncable) {
((Syncable)wrappedStream).hsync();
} else {
wrappedStream.flush();
}
}
@Override
public void setDropBehind(Boolean dropBehind) throws IOException {
try {
((CanSetDropBehind)wrappedStream).setDropBehind(dropBehind);
} catch (ClassCastException e) {
throw new UnsupportedOperationException("the wrapped stream does " +
"not support setting the drop-behind caching setting.");
}
}
}
| 4,522 | 28.180645 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BatchedRemoteIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.List;
import java.util.NoSuchElementException;
/**
* A RemoteIterator that fetches elements in batches.
*/
public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
public interface BatchedEntries<E> {
public E get(int i);
public int size();
public boolean hasMore();
}
public static class BatchedListEntries<E> implements BatchedEntries<E> {
private final List<E> entries;
private final boolean hasMore;
public BatchedListEntries(List<E> entries, boolean hasMore) {
this.entries = entries;
this.hasMore = hasMore;
}
public E get(int i) {
return entries.get(i);
}
public int size() {
return entries.size();
}
public boolean hasMore() {
return hasMore;
}
}
private K prevKey;
private BatchedEntries<E> entries;
private int idx;
public BatchedRemoteIterator(K prevKey) {
this.prevKey = prevKey;
this.entries = null;
this.idx = -1;
}
/**
* Perform the actual remote request.
*
* @param prevKey The key to send.
* @return A list of replies.
*/
public abstract BatchedEntries<E> makeRequest(K prevKey) throws IOException;
private void makeRequest() throws IOException {
idx = 0;
entries = null;
entries = makeRequest(prevKey);
if (entries.size() == 0) {
entries = null;
}
}
private void makeRequestIfNeeded() throws IOException {
if (idx == -1) {
makeRequest();
} else if ((entries != null) && (idx >= entries.size())) {
if (!entries.hasMore()) {
// Last time, we got fewer entries than requested.
// So we should be at the end.
entries = null;
} else {
makeRequest();
}
}
}
@Override
public boolean hasNext() throws IOException {
makeRequestIfNeeded();
return (entries != null);
}
/**
* Return the next list key associated with an element.
*/
public abstract K elementToPrevKey(E element);
@Override
public E next() throws IOException {
makeRequestIfNeeded();
if (entries == null) {
throw new NoSuchElementException();
}
E entry = entries.get(idx++);
prevKey = elementToPrevKey(entry);
return entry;
}
}
| 3,116 | 25.193277 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathExistsException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/**
* Exception corresponding to File Exists - EEXISTS
*/
public class PathExistsException extends PathIOException {
static final long serialVersionUID = 0L;
/** @param path for the exception */
public PathExistsException(String path) {
super(path, "File exists");
}
protected PathExistsException(String path, String error) {
super(path, error);
}
}
| 1,212 | 35.757576 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.JceAesCtrCryptoCodec;
import org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec;
/**
* This class contains constants for configuration keys used
* in the common code.
*
* It includes all publicly documented configuration keys. In general
* this class should not be used directly (use CommonConfigurationKeys
* instead)
*
*/
@InterfaceAudience.Public
public class CommonConfigurationKeysPublic {
// The Keys
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IO_NATIVE_LIB_AVAILABLE_KEY =
"io.native.lib.available";
/** Default value for IO_NATIVE_LIB_AVAILABLE_KEY */
public static final boolean IO_NATIVE_LIB_AVAILABLE_DEFAULT = true;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY =
"net.topology.script.number.args";
/** Default value for NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY */
public static final int NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_DEFAULT = 100;
//FS keys
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_DEFAULT_NAME_KEY = "fs.defaultFS";
/** Default value for FS_DEFAULT_NAME_KEY */
public static final String FS_DEFAULT_NAME_DEFAULT = "file:///";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_DF_INTERVAL_KEY = "fs.df.interval";
/** Default value for FS_DF_INTERVAL_KEY */
public static final long FS_DF_INTERVAL_DEFAULT = 60000;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_DU_INTERVAL_KEY = "fs.du.interval";
/** Default value for FS_DU_INTERVAL_KEY */
public static final long FS_DU_INTERVAL_DEFAULT = 600000;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY =
"fs.client.resolve.remote.symlinks";
/** Default value for FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY */
public static final boolean FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_DEFAULT = true;
//Defaults are not specified for following keys
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY =
"net.topology.script.file.name";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY =
"net.topology.node.switch.mapping.impl";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String NET_TOPOLOGY_IMPL_KEY =
"net.topology.impl";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY =
"net.topology.table.file.name";
public static final String NET_DEPENDENCY_SCRIPT_FILE_NAME_KEY =
"net.topology.dependency.script.file.name";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_TRASH_CHECKPOINT_INTERVAL_KEY =
"fs.trash.checkpoint.interval";
/** Default value for FS_TRASH_CHECKPOINT_INTERVAL_KEY */
public static final long FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT = 0;
// TBD: Code is still using hardcoded values (e.g. "fs.automatic.close")
// instead of constant (e.g. FS_AUTOMATIC_CLOSE_KEY)
//
/** Not used anywhere, looks like default value for FS_LOCAL_BLOCK_SIZE */
public static final long FS_LOCAL_BLOCK_SIZE_DEFAULT = 32*1024*1024;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_AUTOMATIC_CLOSE_KEY = "fs.automatic.close";
/** Default value for FS_AUTOMATIC_CLOSE_KEY */
public static final boolean FS_AUTOMATIC_CLOSE_DEFAULT = true;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_FILE_IMPL_KEY = "fs.file.impl";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_FTP_HOST_KEY = "fs.ftp.host";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_FTP_HOST_PORT_KEY = "fs.ftp.host.port";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String FS_TRASH_INTERVAL_KEY = "fs.trash.interval";
/** Default value for FS_TRASH_INTERVAL_KEY */
public static final long FS_TRASH_INTERVAL_DEFAULT = 0;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IO_MAPFILE_BLOOM_SIZE_KEY =
"io.mapfile.bloom.size";
/** Default value for IO_MAPFILE_BLOOM_SIZE_KEY */
public static final int IO_MAPFILE_BLOOM_SIZE_DEFAULT = 1024*1024;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IO_MAPFILE_BLOOM_ERROR_RATE_KEY =
"io.mapfile.bloom.error.rate" ;
/** Default value for IO_MAPFILE_BLOOM_ERROR_RATE_KEY */
public static final float IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT = 0.005f;
/** Codec class that implements Lzo compression algorithm */
public static final String IO_COMPRESSION_CODEC_LZO_CLASS_KEY =
"io.compression.codec.lzo.class";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IO_MAP_INDEX_INTERVAL_KEY =
"io.map.index.interval";
/** Default value for IO_MAP_INDEX_INTERVAL_DEFAULT */
public static final int IO_MAP_INDEX_INTERVAL_DEFAULT = 128;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IO_MAP_INDEX_SKIP_KEY = "io.map.index.skip";
/** Default value for IO_MAP_INDEX_SKIP_KEY */
public static final int IO_MAP_INDEX_SKIP_DEFAULT = 0;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY =
"io.seqfile.compress.blocksize";
/** Default value for IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY */
public static final int IO_SEQFILE_COMPRESS_BLOCKSIZE_DEFAULT = 1000000;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IO_FILE_BUFFER_SIZE_KEY =
"io.file.buffer.size";
/** Default value for IO_FILE_BUFFER_SIZE_KEY */
public static final int IO_FILE_BUFFER_SIZE_DEFAULT = 4096;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IO_SKIP_CHECKSUM_ERRORS_KEY =
"io.skip.checksum.errors";
/** Default value for IO_SKIP_CHECKSUM_ERRORS_KEY */
public static final boolean IO_SKIP_CHECKSUM_ERRORS_DEFAULT = false;
/**
* @deprecated Moved to mapreduce, see mapreduce.task.io.sort.mb
* in mapred-default.xml
* See https://issues.apache.org/jira/browse/HADOOP-6801
*/
public static final String IO_SORT_MB_KEY = "io.sort.mb";
/** Default value for IO_SORT_MB_DEFAULT */
public static final int IO_SORT_MB_DEFAULT = 100;
/**
* @deprecated Moved to mapreduce, see mapreduce.task.io.sort.factor
* in mapred-default.xml
* See https://issues.apache.org/jira/browse/HADOOP-6801
*/
public static final String IO_SORT_FACTOR_KEY = "io.sort.factor";
/** Default value for IO_SORT_FACTOR_DEFAULT */
public static final int IO_SORT_FACTOR_DEFAULT = 100;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IO_SERIALIZATIONS_KEY = "io.serializations";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String TFILE_IO_CHUNK_SIZE_KEY = "tfile.io.chunk.size";
/** Default value for TFILE_IO_CHUNK_SIZE_DEFAULT */
public static final int TFILE_IO_CHUNK_SIZE_DEFAULT = 1024*1024;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String TFILE_FS_INPUT_BUFFER_SIZE_KEY =
"tfile.fs.input.buffer.size";
/** Default value for TFILE_FS_INPUT_BUFFER_SIZE_KEY */
public static final int TFILE_FS_INPUT_BUFFER_SIZE_DEFAULT = 256*1024;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String TFILE_FS_OUTPUT_BUFFER_SIZE_KEY =
"tfile.fs.output.buffer.size";
/** Default value for TFILE_FS_OUTPUT_BUFFER_SIZE_KEY */
public static final int TFILE_FS_OUTPUT_BUFFER_SIZE_DEFAULT = 256*1024;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY =
"ipc.client.connection.maxidletime";
/** Default value for IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY */
public static final int IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT = 10000; // 10s
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_CLIENT_CONNECT_TIMEOUT_KEY =
"ipc.client.connect.timeout";
/** Default value for IPC_CLIENT_CONNECT_TIMEOUT_KEY */
public static final int IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT = 20000; // 20s
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_CLIENT_CONNECT_MAX_RETRIES_KEY =
"ipc.client.connect.max.retries";
/** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */
public static final int IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT = 10;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY =
"ipc.client.connect.retry.interval";
/** Default value for IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY */
public static final int IPC_CLIENT_CONNECT_RETRY_INTERVAL_DEFAULT = 1000;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY =
"ipc.client.connect.max.retries.on.timeouts";
/** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY */
public static final int IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 45;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_CLIENT_TCPNODELAY_KEY =
"ipc.client.tcpnodelay";
/** Default value for IPC_CLIENT_TCPNODELAY_KEY */
public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = true;
/** Enable low-latency connections from the client */
public static final String IPC_CLIENT_LOW_LATENCY = "ipc.client.low-latency";
/** Default value of IPC_CLIENT_LOW_LATENCY */
public static final boolean IPC_CLIENT_LOW_LATENCY_DEFAULT = false;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_SERVER_LISTEN_QUEUE_SIZE_KEY =
"ipc.server.listen.queue.size";
/** Default value for IPC_SERVER_LISTEN_QUEUE_SIZE_KEY */
public static final int IPC_SERVER_LISTEN_QUEUE_SIZE_DEFAULT = 128;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_CLIENT_KILL_MAX_KEY = "ipc.client.kill.max";
/** Default value for IPC_CLIENT_KILL_MAX_KEY */
public static final int IPC_CLIENT_KILL_MAX_DEFAULT = 10;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_CLIENT_IDLETHRESHOLD_KEY =
"ipc.client.idlethreshold";
/** Default value for IPC_CLIENT_IDLETHRESHOLD_DEFAULT */
public static final int IPC_CLIENT_IDLETHRESHOLD_DEFAULT = 4000;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_SERVER_TCPNODELAY_KEY =
"ipc.server.tcpnodelay";
/** Default value for IPC_SERVER_TCPNODELAY_KEY */
public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = true;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String IPC_SERVER_MAX_CONNECTIONS_KEY =
"ipc.server.max.connections";
/** Default value for IPC_SERVER_MAX_CONNECTIONS_KEY */
public static final int IPC_SERVER_MAX_CONNECTIONS_DEFAULT = 0;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
"hadoop.rpc.socket.factory.class.default";
public static final String HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_DEFAULT =
"org.apache.hadoop.net.StandardSocketFactory";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SOCKS_SERVER_KEY = "hadoop.socks.server";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_UTIL_HASH_TYPE_KEY =
"hadoop.util.hash.type";
/** Default value for HADOOP_UTIL_HASH_TYPE_KEY */
public static final String HADOOP_UTIL_HASH_TYPE_DEFAULT = "murmur";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_GROUP_MAPPING =
"hadoop.security.group.mapping";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_GROUPS_CACHE_SECS =
"hadoop.security.groups.cache.secs";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final long HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT =
300;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS =
"hadoop.security.groups.negative-cache.secs";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final long HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT =
30;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS =
"hadoop.security.groups.cache.warn.after.ms";
public static final long HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT =
5000;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_AUTHENTICATION =
"hadoop.security.authentication";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_AUTHORIZATION =
"hadoop.security.authorization";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN =
"hadoop.security.instrumentation.requires.admin";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_SERVICE_USER_NAME_KEY =
"hadoop.security.service.user.name.key";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_AUTH_TO_LOCAL =
"hadoop.security.auth_to_local";
@Deprecated
/** Only used by HttpServer. */
public static final String HADOOP_SSL_ENABLED_KEY = "hadoop.ssl.enabled";
@Deprecated
/** Only used by HttpServer. */
public static final boolean HADOOP_SSL_ENABLED_DEFAULT = false;
// HTTP policies to be used in configuration
// Use HttpPolicy.name() instead
@Deprecated
public static final String HTTP_POLICY_HTTP_ONLY = "HTTP_ONLY";
@Deprecated
public static final String HTTP_POLICY_HTTPS_ONLY = "HTTPS_ONLY";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_RPC_PROTECTION =
"hadoop.rpc.protection";
/** Class to override Sasl Properties for a connection */
public static final String HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS =
"hadoop.security.saslproperties.resolver.class";
public static final String HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX =
"hadoop.security.crypto.codec.classes";
public static final String
HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY =
HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX
+ CipherSuite.AES_CTR_NOPADDING.getConfigSuffix();
public static final String
HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_DEFAULT =
OpensslAesCtrCryptoCodec.class.getName() + "," +
JceAesCtrCryptoCodec.class.getName();
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY =
"hadoop.security.crypto.cipher.suite";
public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT =
"AES/CTR/NoPadding";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY =
"hadoop.security.crypto.jce.provider";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY =
"hadoop.security.crypto.buffer.size";
/** Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY */
public static final int HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT = 8192;
/** Class to override Impersonation provider */
public static final String HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS =
"hadoop.security.impersonation.provider.class";
// <!-- KMSClientProvider configurations -->
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String KMS_CLIENT_ENC_KEY_CACHE_SIZE =
"hadoop.security.kms.client.encrypted.key.cache.size";
/** Default value for KMS_CLIENT_ENC_KEY_CACHE_SIZE */
public static final int KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT = 500;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK =
"hadoop.security.kms.client.encrypted.key.cache.low-watermark";
/** Default value for KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK */
public static final float KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT =
0.3f;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS =
"hadoop.security.kms.client.encrypted.key.cache.num.refill.threads";
/** Default value for KMS_CLIENT_ENC_KEY_NUM_REFILL_THREADS */
public static final int KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT =
2;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS =
"hadoop.security.kms.client.encrypted.key.cache.expiry";
/** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/
public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY =
"hadoop.security.java.secure.random.algorithm";
/** Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY */
public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT =
"SHA1PRNG";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY =
"hadoop.security.secure.random.impl";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY =
"hadoop.security.random.device.file.path";
public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT =
"/dev/urandom";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY =
"hadoop.shell.missing.defaultFs.warning";
public static final boolean HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT =
false;
}
| 21,078 | 53.4677 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.net.URI;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* FileSystem related constants.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface FsConstants {
// URI for local filesystem
public static final URI LOCAL_FS_URI = URI.create("file:///");
// URI scheme for FTP
public static final String FTP_SCHEME = "ftp";
// Maximum number of symlinks to recursively resolve in a path
static final int MAX_PATH_LINKS = 32;
/**
* ViewFs: viewFs file system (ie the mount file system on client side)
*/
public static final URI VIEWFS_URI = URI.create("viewfs:///");
public static final String VIEWFS_SCHEME = "viewfs";
}
| 1,591 | 33.608696 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Time;
/** Provides a <i>trash</i> feature. Files are moved to a user's trash
* directory, a subdirectory of their home directory named ".Trash". Files are
* initially moved to a <i>current</i> sub-directory of the trash directory.
* Within that sub-directory their original path is preserved. Periodically
* one may checkpoint the current trash and remove older checkpoints. (This
* design permits trash management without enumeration of the full trash
* content, without date support in the filesystem, and without clock
* synchronization.)
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class TrashPolicyDefault extends TrashPolicy {
private static final Log LOG =
LogFactory.getLog(TrashPolicyDefault.class);
private static final Path CURRENT = new Path("Current");
private static final Path TRASH = new Path(".Trash/");
private static final FsPermission PERMISSION =
new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE);
private static final DateFormat CHECKPOINT = new SimpleDateFormat("yyMMddHHmmss");
/** Format of checkpoint directories used prior to Hadoop 0.23. */
private static final DateFormat OLD_CHECKPOINT =
new SimpleDateFormat("yyMMddHHmm");
private static final int MSECS_PER_MINUTE = 60*1000;
private Path current;
private Path homesParent;
private long emptierInterval;
public TrashPolicyDefault() { }
private TrashPolicyDefault(FileSystem fs, Path home, Configuration conf)
throws IOException {
initialize(conf, fs, home);
}
@Override
public void initialize(Configuration conf, FileSystem fs, Path home) {
this.fs = fs;
this.trash = new Path(home, TRASH);
this.homesParent = home.getParent();
this.current = new Path(trash, CURRENT);
this.deletionInterval = (long)(conf.getFloat(
FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)
* MSECS_PER_MINUTE);
this.emptierInterval = (long)(conf.getFloat(
FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
* MSECS_PER_MINUTE);
}
private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
return Path.mergePaths(basePath, rmFilePath);
}
@Override
public boolean isEnabled() {
return deletionInterval != 0;
}
@Override
public boolean moveToTrash(Path path) throws IOException {
if (!isEnabled())
return false;
if (!path.isAbsolute()) // make path absolute
path = new Path(fs.getWorkingDirectory(), path);
if (!fs.exists(path)) // check that path exists
throw new FileNotFoundException(path.toString());
String qpath = fs.makeQualified(path).toString();
if (qpath.startsWith(trash.toString())) {
return false; // already in trash
}
if (trash.getParent().toString().startsWith(qpath)) {
throw new IOException("Cannot move \"" + path +
"\" to the trash, as it contains the trash");
}
Path trashPath = makeTrashRelativePath(current, path);
Path baseTrashPath = makeTrashRelativePath(current, path.getParent());
IOException cause = null;
// try twice, in case checkpoint between the mkdirs() & rename()
for (int i = 0; i < 2; i++) {
try {
if (!fs.mkdirs(baseTrashPath, PERMISSION)) { // create current
LOG.warn("Can't create(mkdir) trash directory: " + baseTrashPath);
return false;
}
} catch (IOException e) {
LOG.warn("Can't create trash directory: " + baseTrashPath, e);
cause = e;
break;
}
try {
// if the target path in Trash already exists, then append with
// a current time in millisecs.
String orig = trashPath.toString();
while(fs.exists(trashPath)) {
trashPath = new Path(orig + Time.now());
}
if (fs.rename(path, trashPath)) // move to current trash
return true;
} catch (IOException e) {
cause = e;
}
}
throw (IOException)
new IOException("Failed to move to trash: "+path).initCause(cause);
}
@SuppressWarnings("deprecation")
@Override
public void createCheckpoint() throws IOException {
if (!fs.exists(current)) // no trash, no checkpoint
return;
Path checkpointBase;
synchronized (CHECKPOINT) {
checkpointBase = new Path(trash, CHECKPOINT.format(new Date()));
}
Path checkpoint = checkpointBase;
int attempt = 0;
while (true) {
try {
fs.rename(current, checkpoint, Rename.NONE);
break;
} catch (FileAlreadyExistsException e) {
if (++attempt > 1000) {
throw new IOException("Failed to checkpoint trash: "+checkpoint);
}
checkpoint = checkpointBase.suffix("-" + attempt);
}
}
LOG.info("Created trash checkpoint: "+checkpoint.toUri().getPath());
}
@Override
public void deleteCheckpoint() throws IOException {
FileStatus[] dirs = null;
try {
dirs = fs.listStatus(trash); // scan trash sub-directories
} catch (FileNotFoundException fnfe) {
return;
}
long now = Time.now();
for (int i = 0; i < dirs.length; i++) {
Path path = dirs[i].getPath();
String dir = path.toUri().getPath();
String name = path.getName();
if (name.equals(CURRENT.getName())) // skip current
continue;
long time;
try {
time = getTimeFromCheckpoint(name);
} catch (ParseException e) {
LOG.warn("Unexpected item in trash: "+dir+". Ignoring.");
continue;
}
if ((now - deletionInterval) > time) {
if (fs.delete(path, true)) {
LOG.info("Deleted trash checkpoint: "+dir);
} else {
LOG.warn("Couldn't delete checkpoint: "+dir+" Ignoring.");
}
}
}
}
@Override
public Path getCurrentTrashDir() {
return current;
}
@Override
public Runnable getEmptier() throws IOException {
return new Emptier(getConf(), emptierInterval);
}
private class Emptier implements Runnable {
private Configuration conf;
private long emptierInterval;
Emptier(Configuration conf, long emptierInterval) throws IOException {
this.conf = conf;
this.emptierInterval = emptierInterval;
if (emptierInterval > deletionInterval || emptierInterval == 0) {
LOG.info("The configured checkpoint interval is " +
(emptierInterval / MSECS_PER_MINUTE) + " minutes." +
" Using an interval of " +
(deletionInterval / MSECS_PER_MINUTE) +
" minutes that is used for deletion instead");
this.emptierInterval = deletionInterval;
}
LOG.info("Namenode trash configuration: Deletion interval = "
+ (deletionInterval / MSECS_PER_MINUTE)
+ " minutes, Emptier interval = "
+ (emptierInterval / MSECS_PER_MINUTE) + " minutes.");
}
@Override
public void run() {
if (emptierInterval == 0)
return; // trash disabled
long now = Time.now();
long end;
while (true) {
end = ceiling(now, emptierInterval);
try { // sleep for interval
Thread.sleep(end - now);
} catch (InterruptedException e) {
break; // exit on interrupt
}
try {
now = Time.now();
if (now >= end) {
FileStatus[] homes = null;
try {
homes = fs.listStatus(homesParent); // list all home dirs
} catch (IOException e) {
LOG.warn("Trash can't list homes: "+e+" Sleeping.");
continue;
}
for (FileStatus home : homes) { // dump each trash
if (!home.isDirectory())
continue;
try {
TrashPolicyDefault trash = new TrashPolicyDefault(
fs, home.getPath(), conf);
trash.deleteCheckpoint();
trash.createCheckpoint();
} catch (IOException e) {
LOG.warn("Trash caught: "+e+". Skipping "+home.getPath()+".");
}
}
}
} catch (Exception e) {
LOG.warn("RuntimeException during Trash.Emptier.run(): ", e);
}
}
try {
fs.close();
} catch(IOException e) {
LOG.warn("Trash cannot close FileSystem: ", e);
}
}
private long ceiling(long time, long interval) {
return floor(time, interval) + interval;
}
private long floor(long time, long interval) {
return (time / interval) * interval;
}
}
private long getTimeFromCheckpoint(String name) throws ParseException {
long time;
try {
synchronized (CHECKPOINT) {
time = CHECKPOINT.parse(name).getTime();
}
} catch (ParseException pe) {
// Check for old-style checkpoint directories left over
// after an upgrade from Hadoop 1.x
synchronized (OLD_CHECKPOINT) {
time = OLD_CHECKPOINT.parse(name).getTime();
}
}
return time;
}
}
| 11,200 | 32.636637 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.ByteBufferPool;
import com.google.common.base.Preconditions;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public final class ByteBufferUtil {
/**
* Determine if a stream can do a byte buffer read via read(ByteBuffer buf)
*/
private static boolean streamHasByteBufferRead(InputStream stream) {
if (!(stream instanceof ByteBufferReadable)) {
return false;
}
if (!(stream instanceof FSDataInputStream)) {
return true;
}
return ((FSDataInputStream)stream).getWrappedStream()
instanceof ByteBufferReadable;
}
/**
* Perform a fallback read.
*/
public static ByteBuffer fallbackRead(
InputStream stream, ByteBufferPool bufferPool, int maxLength)
throws IOException {
if (bufferPool == null) {
throw new UnsupportedOperationException("zero-copy reads " +
"were not available, and you did not provide a fallback " +
"ByteBufferPool.");
}
boolean useDirect = streamHasByteBufferRead(stream);
ByteBuffer buffer = bufferPool.getBuffer(useDirect, maxLength);
if (buffer == null) {
throw new UnsupportedOperationException("zero-copy reads " +
"were not available, and the ByteBufferPool did not provide " +
"us with " + (useDirect ? "a direct" : "an indirect") +
"buffer.");
}
Preconditions.checkState(buffer.capacity() > 0);
Preconditions.checkState(buffer.isDirect() == useDirect);
maxLength = Math.min(maxLength, buffer.capacity());
boolean success = false;
try {
if (useDirect) {
buffer.clear();
buffer.limit(maxLength);
ByteBufferReadable readable = (ByteBufferReadable)stream;
int totalRead = 0;
while (true) {
if (totalRead >= maxLength) {
success = true;
break;
}
int nRead = readable.read(buffer);
if (nRead < 0) {
if (totalRead > 0) {
success = true;
}
break;
}
totalRead += nRead;
}
buffer.flip();
} else {
buffer.clear();
int nRead = stream.read(buffer.array(),
buffer.arrayOffset(), maxLength);
if (nRead >= 0) {
buffer.limit(nRead);
success = true;
}
}
} finally {
if (!success) {
// If we got an error while reading, or if we are at EOF, we
// don't need the buffer any more. We can give it back to the
// bufferPool.
bufferPool.putBuffer(buffer);
buffer = null;
}
}
return buffer;
}
}
| 3,698 | 31.447368 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegationTokenRenewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
import java.lang.ref.WeakReference;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.Delayed;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Time;
/**
* A daemon thread that waits for the next file system to renew.
*/
@InterfaceAudience.Private
public class DelegationTokenRenewer
extends Thread {
private static final Log LOG = LogFactory
.getLog(DelegationTokenRenewer.class);
/** The renewable interface used by the renewer. */
public interface Renewable {
/** @return the renew token. */
public Token<?> getRenewToken();
/** Set delegation token. */
public <T extends TokenIdentifier> void setDelegationToken(Token<T> token);
}
/**
* An action that will renew and replace the file system's delegation
* tokens automatically.
*/
public static class RenewAction<T extends FileSystem & Renewable>
implements Delayed {
/** when should the renew happen */
private long renewalTime;
/** a weak reference to the file system so that it can be garbage collected */
private final WeakReference<T> weakFs;
private Token<?> token;
boolean isValid = true;
private RenewAction(final T fs) {
this.weakFs = new WeakReference<T>(fs);
this.token = fs.getRenewToken();
updateRenewalTime(renewCycle);
}
public boolean isValid() {
return isValid;
}
/** Get the delay until this event should happen. */
@Override
public long getDelay(final TimeUnit unit) {
final long millisLeft = renewalTime - Time.now();
return unit.convert(millisLeft, TimeUnit.MILLISECONDS);
}
@Override
public int compareTo(final Delayed delayed) {
final RenewAction<?> that = (RenewAction<?>)delayed;
return this.renewalTime < that.renewalTime? -1
: this.renewalTime == that.renewalTime? 0: 1;
}
@Override
public int hashCode() {
return token.hashCode();
}
@Override
public boolean equals(final Object that) {
if (this == that) {
return true;
} else if (that == null || !(that instanceof RenewAction)) {
return false;
}
return token.equals(((RenewAction<?>)that).token);
}
/**
* Set a new time for the renewal.
* It can only be called when the action is not in the queue or any
* collection because the hashCode may change
* @param newTime the new time
*/
private void updateRenewalTime(long delay) {
renewalTime = Time.now() + delay - delay/10;
}
/**
* Renew or replace the delegation token for this file system.
* It can only be called when the action is not in the queue.
* @return
* @throws IOException
*/
private boolean renew() throws IOException, InterruptedException {
final T fs = weakFs.get();
final boolean b = fs != null;
if (b) {
synchronized(fs) {
try {
long expires = token.renew(fs.getConf());
updateRenewalTime(expires - Time.now());
} catch (IOException ie) {
try {
Token<?>[] tokens = fs.addDelegationTokens(null, null);
if (tokens.length == 0) {
throw new IOException("addDelegationTokens returned no tokens");
}
token = tokens[0];
updateRenewalTime(renewCycle);
fs.setDelegationToken(token);
} catch (IOException ie2) {
isValid = false;
throw new IOException("Can't renew or get new delegation token ", ie);
}
}
}
}
return b;
}
private void cancel() throws IOException, InterruptedException {
final T fs = weakFs.get();
if (fs != null) {
token.cancel(fs.getConf());
}
}
@Override
public String toString() {
Renewable fs = weakFs.get();
return fs == null? "evaporated token renew"
: "The token will be renewed in " + getDelay(TimeUnit.SECONDS)
+ " secs, renewToken=" + token;
}
}
/** assumes renew cycle for a token is 24 hours... */
private static final long RENEW_CYCLE = 24 * 60 * 60 * 1000;
@InterfaceAudience.Private
@VisibleForTesting
public static long renewCycle = RENEW_CYCLE;
/** Queue to maintain the RenewActions to be processed by the {@link #run()} */
private volatile DelayQueue<RenewAction<?>> queue = new DelayQueue<RenewAction<?>>();
/** For testing purposes */
@VisibleForTesting
protected int getRenewQueueLength() {
return queue.size();
}
/**
* Create the singleton instance. However, the thread can be started lazily in
* {@link #addRenewAction(FileSystem)}
*/
private static DelegationTokenRenewer INSTANCE = null;
private DelegationTokenRenewer(final Class<? extends FileSystem> clazz) {
super(clazz.getSimpleName() + "-" + DelegationTokenRenewer.class.getSimpleName());
setDaemon(true);
}
public static synchronized DelegationTokenRenewer getInstance() {
if (INSTANCE == null) {
INSTANCE = new DelegationTokenRenewer(FileSystem.class);
}
return INSTANCE;
}
@VisibleForTesting
static synchronized void reset() {
if (INSTANCE != null) {
INSTANCE.queue.clear();
INSTANCE.interrupt();
try {
INSTANCE.join();
} catch (InterruptedException e) {
LOG.warn("Failed to reset renewer");
} finally {
INSTANCE = null;
}
}
}
/** Add a renew action to the queue. */
@SuppressWarnings("static-access")
public <T extends FileSystem & Renewable> RenewAction<T> addRenewAction(final T fs) {
synchronized (this) {
if (!isAlive()) {
start();
}
}
RenewAction<T> action = new RenewAction<T>(fs);
if (action.token != null) {
queue.add(action);
} else {
fs.LOG.error("does not have a token for renewal");
}
return action;
}
/**
* Remove the associated renew action from the queue
*
* @throws IOException
*/
public <T extends FileSystem & Renewable> void removeRenewAction(
final T fs) throws IOException {
RenewAction<T> action = new RenewAction<T>(fs);
if (queue.remove(action)) {
try {
action.cancel();
} catch (InterruptedException ie) {
LOG.error("Interrupted while canceling token for " + fs.getUri()
+ "filesystem");
if (LOG.isDebugEnabled()) {
LOG.debug(ie.getStackTrace());
}
}
}
}
@SuppressWarnings("static-access")
@Override
public void run() {
for(;;) {
RenewAction<?> action = null;
try {
action = queue.take();
if (action.renew()) {
queue.add(action);
}
} catch (InterruptedException ie) {
return;
} catch (Exception ie) {
action.weakFs.get().LOG.warn("Failed to renew token, action=" + action,
ie);
}
}
}
}
| 8,155 | 29.095941 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader;
import org.apache.hadoop.util.Progressable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.util.*;
/**
* This is an implementation of the Hadoop Archive
* Filesystem. This archive Filesystem has index files
* of the form _index* and has contents of the form
* part-*. The index files store the indexes of the
* real files. The index files are of the form _masterindex
* and _index. The master index is a level of indirection
* in to the index file to make the look ups faster. the index
* file is sorted with hash code of the paths that it contains
* and the master index contains pointers to the positions in
* index for ranges of hashcodes.
*/
public class HarFileSystem extends FileSystem {
private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
public static final String METADATA_CACHE_ENTRIES_KEY = "fs.har.metadatacache.entries";
public static final int METADATA_CACHE_ENTRIES_DEFAULT = 10;
public static final int VERSION = 3;
private static Map<URI, HarMetaData> harMetaCache;
// uri representation of this Har filesystem
private URI uri;
// the top level path of the archive
// in the underlying file system
private Path archivePath;
// the har auth
private String harAuth;
// pointer into the static metadata cache
private HarMetaData metadata;
private FileSystem fs;
/**
* public construction of harfilesystem
*/
public HarFileSystem() {
// Must call #initialize() method to set the underlying file system
}
/**
* Return the protocol scheme for the FileSystem.
* <p/>
*
* @return <code>har</code>
*/
@Override
public String getScheme() {
return "har";
}
/**
* Constructor to create a HarFileSystem with an
* underlying filesystem.
* @param fs underlying file system
*/
public HarFileSystem(FileSystem fs) {
this.fs = fs;
this.statistics = fs.statistics;
}
private synchronized void initializeMetadataCache(Configuration conf) {
if (harMetaCache == null) {
int cacheSize = conf.getInt(METADATA_CACHE_ENTRIES_KEY, METADATA_CACHE_ENTRIES_DEFAULT);
harMetaCache = Collections.synchronizedMap(new LruCache<URI, HarMetaData>(cacheSize));
}
}
/**
* Initialize a Har filesystem per har archive. The
* archive home directory is the top level directory
* in the filesystem that contains the HAR archive.
* Be careful with this method, you do not want to go
* on creating new Filesystem instances per call to
* path.getFileSystem().
* the uri of Har is
* har://underlyingfsscheme-host:port/archivepath.
* or
* har:///archivepath. This assumes the underlying filesystem
* to be used in case not specified.
*/
@Override
public void initialize(URI name, Configuration conf) throws IOException {
// initialize the metadata cache, if needed
initializeMetadataCache(conf);
// decode the name
URI underLyingURI = decodeHarURI(name, conf);
// we got the right har Path- now check if this is
// truly a har filesystem
Path harPath = archivePath(
new Path(name.getScheme(), name.getAuthority(), name.getPath()));
if (harPath == null) {
throw new IOException("Invalid path for the Har Filesystem. " +
name.toString());
}
if (fs == null) {
fs = FileSystem.get(underLyingURI, conf);
}
uri = harPath.toUri();
archivePath = new Path(uri.getPath());
harAuth = getHarAuth(underLyingURI);
//check for the underlying fs containing
// the index file
Path masterIndexPath = new Path(archivePath, "_masterindex");
Path archiveIndexPath = new Path(archivePath, "_index");
if (!fs.exists(masterIndexPath) || !fs.exists(archiveIndexPath)) {
throw new IOException("Invalid path for the Har Filesystem. " +
"No index file in " + harPath);
}
metadata = harMetaCache.get(uri);
if (metadata != null) {
FileStatus mStat = fs.getFileStatus(masterIndexPath);
FileStatus aStat = fs.getFileStatus(archiveIndexPath);
if (mStat.getModificationTime() != metadata.getMasterIndexTimestamp() ||
aStat.getModificationTime() != metadata.getArchiveIndexTimestamp()) {
// the archive has been overwritten since we last read it
// remove the entry from the meta data cache
metadata = null;
harMetaCache.remove(uri);
}
}
if (metadata == null) {
metadata = new HarMetaData(fs, masterIndexPath, archiveIndexPath);
metadata.parseMetaData();
harMetaCache.put(uri, metadata);
}
}
@Override
public Configuration getConf() {
return fs.getConf();
}
// get the version of the filesystem from the masterindex file
// the version is currently not useful since its the first version
// of archives
public int getHarVersion() throws IOException {
if (metadata != null) {
return metadata.getVersion();
}
else {
throw new IOException("Invalid meta data for the Har Filesystem");
}
}
/*
* find the parent path that is the
* archive path in the path. The last
* path segment that ends with .har is
* the path that will be returned.
*/
private Path archivePath(Path p) {
Path retPath = null;
Path tmp = p;
for (int i=0; i< p.depth(); i++) {
if (tmp.toString().endsWith(".har")) {
retPath = tmp;
break;
}
tmp = tmp.getParent();
}
return retPath;
}
/**
* decode the raw URI to get the underlying URI
* @param rawURI raw Har URI
* @return filtered URI of the underlying fileSystem
*/
private URI decodeHarURI(URI rawURI, Configuration conf) throws IOException {
String tmpAuth = rawURI.getAuthority();
//we are using the default file
//system in the config
//so create a underlying uri and
//return it
if (tmpAuth == null) {
//create a path
return FileSystem.getDefaultUri(conf);
}
String authority = rawURI.getAuthority();
int i = authority.indexOf('-');
if (i < 0) {
throw new IOException("URI: " + rawURI
+ " is an invalid Har URI since '-' not found."
+ " Expecting har://<scheme>-<host>/<path>.");
}
if (rawURI.getQuery() != null) {
// query component not allowed
throw new IOException("query component in Path not supported " + rawURI);
}
URI tmp;
try {
// convert <scheme>-<host> to <scheme>://<host>
URI baseUri = new URI(authority.replaceFirst("-", "://"));
tmp = new URI(baseUri.getScheme(), baseUri.getAuthority(),
rawURI.getPath(), rawURI.getQuery(), rawURI.getFragment());
} catch (URISyntaxException e) {
throw new IOException("URI: " + rawURI
+ " is an invalid Har URI. Expecting har://<scheme>-<host>/<path>.");
}
return tmp;
}
private static String decodeString(String str)
throws UnsupportedEncodingException {
return URLDecoder.decode(str, "UTF-8");
}
private String decodeFileName(String fname)
throws UnsupportedEncodingException {
int version = metadata.getVersion();
if (version == 2 || version == 3){
return decodeString(fname);
}
return fname;
}
/**
* return the top level archive.
*/
@Override
public Path getWorkingDirectory() {
return new Path(uri.toString());
}
@Override
public Path getInitialWorkingDirectory() {
return getWorkingDirectory();
}
@Override
public FsStatus getStatus(Path p) throws IOException {
return fs.getStatus(p);
}
/**
* Create a har specific auth
* har-underlyingfs:port
* @param underLyingUri the uri of underlying
* filesystem
* @return har specific auth
*/
private String getHarAuth(URI underLyingUri) {
String auth = underLyingUri.getScheme() + "-";
if (underLyingUri.getHost() != null) {
if (underLyingUri.getUserInfo() != null) {
auth += underLyingUri.getUserInfo();
auth += "@";
}
auth += underLyingUri.getHost();
if (underLyingUri.getPort() != -1) {
auth += ":";
auth += underLyingUri.getPort();
}
}
else {
auth += ":";
}
return auth;
}
/**
* Used for delegation token related functionality. Must delegate to
* underlying file system.
*/
@Override
protected URI getCanonicalUri() {
return fs.getCanonicalUri();
}
@Override
protected URI canonicalizeUri(URI uri) {
return fs.canonicalizeUri(uri);
}
/**
* Returns the uri of this filesystem.
* The uri is of the form
* har://underlyingfsschema-host:port/pathintheunderlyingfs
*/
@Override
public URI getUri() {
return this.uri;
}
@Override
protected void checkPath(Path path) {
fs.checkPath(path);
}
@Override
public Path resolvePath(Path p) throws IOException {
return fs.resolvePath(p);
}
/**
* this method returns the path
* inside the har filesystem.
* this is relative path inside
* the har filesystem.
* @param path the fully qualified path in the har filesystem.
* @return relative path in the filesystem.
*/
private Path getPathInHar(Path path) {
Path harPath = new Path(path.toUri().getPath());
if (archivePath.compareTo(harPath) == 0)
return new Path(Path.SEPARATOR);
Path tmp = new Path(harPath.getName());
Path parent = harPath.getParent();
while (!(parent.compareTo(archivePath) == 0)) {
if (parent.toString().equals(Path.SEPARATOR)) {
tmp = null;
break;
}
tmp = new Path(parent.getName(), tmp);
parent = parent.getParent();
}
if (tmp != null)
tmp = new Path(Path.SEPARATOR, tmp);
return tmp;
}
//the relative path of p. basically
// getting rid of /. Parsing and doing
// string manipulation is not good - so
// just use the path api to do it.
private Path makeRelative(String initial, Path p) {
String scheme = this.uri.getScheme();
String authority = this.uri.getAuthority();
Path root = new Path(Path.SEPARATOR);
if (root.compareTo(p) == 0)
return new Path(scheme, authority, initial);
Path retPath = new Path(p.getName());
Path parent = p.getParent();
for (int i=0; i < p.depth()-1; i++) {
retPath = new Path(parent.getName(), retPath);
parent = parent.getParent();
}
return new Path(new Path(scheme, authority, initial),
retPath.toString());
}
/* this makes a path qualified in the har filesystem
* (non-Javadoc)
* @see org.apache.hadoop.fs.FilterFileSystem#makeQualified(
* org.apache.hadoop.fs.Path)
*/
@Override
public Path makeQualified(Path path) {
// make sure that we just get the
// path component
Path fsPath = path;
if (!path.isAbsolute()) {
fsPath = new Path(archivePath, path);
}
URI tmpURI = fsPath.toUri();
//change this to Har uri
return new Path(uri.getScheme(), harAuth, tmpURI.getPath());
}
/**
* Fix offset and length of block locations.
* Note that this method modifies the original array.
* @param locations block locations of har part file
* @param start the start of the desired range in the contained file
* @param len the length of the desired range
* @param fileOffsetInHar the offset of the desired file in the har part file
* @return block locations with fixed offset and length
*/
static BlockLocation[] fixBlockLocations(BlockLocation[] locations,
long start,
long len,
long fileOffsetInHar) {
// offset 1 past last byte of desired range
long end = start + len;
for (BlockLocation location : locations) {
// offset of part block relative to beginning of desired file
// (may be negative if file starts in this part block)
long harBlockStart = location.getOffset() - fileOffsetInHar;
// offset 1 past last byte of har block relative to beginning of
// desired file
long harBlockEnd = harBlockStart + location.getLength();
if (start > harBlockStart) {
// desired range starts after beginning of this har block
// fix offset to beginning of relevant range (relative to desired file)
location.setOffset(start);
// fix length to relevant portion of har block
location.setLength(location.getLength() - (start - harBlockStart));
} else {
// desired range includes beginning of this har block
location.setOffset(harBlockStart);
}
if (harBlockEnd > end) {
// range ends before end of this har block
// fix length to remove irrelevant portion at the end
location.setLength(location.getLength() - (harBlockEnd - end));
}
}
return locations;
}
/**
* Get block locations from the underlying fs and fix their
* offsets and lengths.
* @param file the input file status to get block locations
* @param start the start of the desired range in the contained file
* @param len the length of the desired range
* @return block locations for this segment of file
* @throws IOException
*/
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
long len) throws IOException {
HarStatus hstatus = getFileHarStatus(file.getPath());
Path partPath = new Path(archivePath, hstatus.getPartName());
FileStatus partStatus = metadata.getPartFileStatus(partPath);
// get all part blocks that overlap with the desired file blocks
BlockLocation[] locations =
fs.getFileBlockLocations(partStatus,
hstatus.getStartIndex() + start, len);
return fixBlockLocations(locations, start, len, hstatus.getStartIndex());
}
/**
* the hash of the path p inside the filesystem
* @param p the path in the harfilesystem
* @return the hash code of the path.
*/
public static int getHarHash(Path p) {
return (p.toString().hashCode() & 0x7fffffff);
}
static class Store {
public Store(long begin, long end) {
this.begin = begin;
this.end = end;
}
public long begin;
public long end;
}
/**
* Get filestatuses of all the children of a given directory. This just reads
* through index file and reads line by line to get all statuses for children
* of a directory. Its a brute force way of getting all such filestatuses
*
* @param parent
* the parent path directory
* @param statuses
* the list to add the children filestatuses to
*/
private void fileStatusesInIndex(HarStatus parent, List<FileStatus> statuses)
throws IOException {
String parentString = parent.getName();
if (!parentString.endsWith(Path.SEPARATOR)){
parentString += Path.SEPARATOR;
}
Path harPath = new Path(parentString);
int harlen = harPath.depth();
final Map<String, FileStatus> cache = new TreeMap<String, FileStatus>();
for (HarStatus hstatus : metadata.archive.values()) {
String child = hstatus.getName();
if ((child.startsWith(parentString))) {
Path thisPath = new Path(child);
if (thisPath.depth() == harlen + 1) {
statuses.add(toFileStatus(hstatus, cache));
}
}
}
}
/**
* Combine the status stored in the index and the underlying status.
* @param h status stored in the index
* @param cache caching the underlying file statuses
* @return the combined file status
* @throws IOException
*/
private FileStatus toFileStatus(HarStatus h,
Map<String, FileStatus> cache) throws IOException {
FileStatus underlying = null;
if (cache != null) {
underlying = cache.get(h.partName);
}
if (underlying == null) {
final Path p = h.isDir? archivePath: new Path(archivePath, h.partName);
underlying = fs.getFileStatus(p);
if (cache != null) {
cache.put(h.partName, underlying);
}
}
long modTime = 0;
int version = metadata.getVersion();
if (version < 3) {
modTime = underlying.getModificationTime();
} else if (version == 3) {
modTime = h.getModificationTime();
}
return new FileStatus(
h.isDir()? 0L: h.getLength(),
h.isDir(),
underlying.getReplication(),
underlying.getBlockSize(),
modTime,
underlying.getAccessTime(),
underlying.getPermission(),
underlying.getOwner(),
underlying.getGroup(),
makeRelative(this.uri.getPath(), new Path(h.name)));
}
// a single line parser for hadoop archives status
// stored in a single line in the index files
// the format is of the form
// filename "dir"/"file" partFileName startIndex length
// <space separated children>
private class HarStatus {
boolean isDir;
String name;
List<String> children;
String partName;
long startIndex;
long length;
long modificationTime = 0;
public HarStatus(String harString) throws UnsupportedEncodingException {
String[] splits = harString.split(" ");
this.name = decodeFileName(splits[0]);
this.isDir = "dir".equals(splits[1]);
// this is equal to "none" if its a directory
this.partName = splits[2];
this.startIndex = Long.parseLong(splits[3]);
this.length = Long.parseLong(splits[4]);
int version = metadata.getVersion();
String[] propSplits = null;
// propSplits is used to retrieve the metainformation that Har versions
// 1 & 2 missed (modification time, permission, owner group).
// These fields are stored in an encoded string placed in different
// locations depending on whether it's a file or directory entry.
// If it's a directory, the string will be placed at the partName
// location (directories have no partName because they don't have data
// to be stored). This is done because the number of fields in a
// directory entry is unbounded (all children are listed at the end)
// If it's a file, the string will be the last field.
if (isDir) {
if (version == 3){
propSplits = decodeString(this.partName).split(" ");
}
children = new ArrayList<String>();
for (int i = 5; i < splits.length; i++) {
children.add(decodeFileName(splits[i]));
}
} else if (version == 3) {
propSplits = decodeString(splits[5]).split(" ");
}
if (propSplits != null && propSplits.length >= 4) {
modificationTime = Long.parseLong(propSplits[0]);
// the fields below are stored in the file but are currently not used
// by HarFileSystem
// permission = new FsPermission(Short.parseShort(propSplits[1]));
// owner = decodeString(propSplits[2]);
// group = decodeString(propSplits[3]);
}
}
public boolean isDir() {
return isDir;
}
public String getName() {
return name;
}
public String getPartName() {
return partName;
}
public long getStartIndex() {
return startIndex;
}
public long getLength() {
return length;
}
public long getModificationTime() {
return modificationTime;
}
}
/**
* return the filestatus of files in har archive.
* The permission returned are that of the archive
* index files. The permissions are not persisted
* while creating a hadoop archive.
* @param f the path in har filesystem
* @return filestatus.
* @throws IOException
*/
@Override
public FileStatus getFileStatus(Path f) throws IOException {
HarStatus hstatus = getFileHarStatus(f);
return toFileStatus(hstatus, null);
}
private HarStatus getFileHarStatus(Path f) throws IOException {
// get the fs DataInputStream for the underlying file
// look up the index.
Path p = makeQualified(f);
Path harPath = getPathInHar(p);
if (harPath == null) {
throw new IOException("Invalid file name: " + f + " in " + uri);
}
HarStatus hstatus = metadata.archive.get(harPath);
if (hstatus == null) {
throw new FileNotFoundException("File: " + f + " does not exist in " + uri);
}
return hstatus;
}
/**
* @return null since no checksum algorithm is implemented.
*/
@Override
public FileChecksum getFileChecksum(Path f, long length) {
return null;
}
/**
* Returns a har input stream which fakes end of
* file. It reads the index files to get the part
* file name and the size and start of the file.
*/
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
// get the fs DataInputStream for the underlying file
HarStatus hstatus = getFileHarStatus(f);
if (hstatus.isDir()) {
throw new FileNotFoundException(f + " : not a file in " +
archivePath);
}
return new HarFSDataInputStream(fs, new Path(archivePath,
hstatus.getPartName()),
hstatus.getStartIndex(), hstatus.getLength(), bufferSize);
}
/**
* Used for delegation token related functionality. Must delegate to
* underlying file system.
*/
@Override
public FileSystem[] getChildFileSystems() {
return new FileSystem[]{fs};
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
throw new IOException("Har: create not allowed.");
}
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
int bufferSize, short replication, long blockSize, Progressable progress)
throws IOException {
throw new IOException("Har: create not allowed.");
}
@Override
public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
throw new IOException("Har: append not allowed.");
}
@Override
public void close() throws IOException {
super.close();
if (fs != null) {
try {
fs.close();
} catch(IOException ie) {
//this might already be closed
// ignore
}
}
}
/**
* Not implemented.
*/
@Override
public boolean setReplication(Path src, short replication) throws IOException{
throw new IOException("Har: setReplication not allowed");
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
throw new IOException("Har: rename not allowed");
}
@Override
public FSDataOutputStream append(Path f) throws IOException {
throw new IOException("Har: append not allowed");
}
/**
* Not implemented.
*/
@Override
public boolean truncate(Path f, long newLength) throws IOException {
throw new IOException("Har: truncate not allowed");
}
/**
* Not implemented.
*/
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
throw new IOException("Har: delete not allowed");
}
/**
* liststatus returns the children of a directory
* after looking up the index files.
*/
@Override
public FileStatus[] listStatus(Path f) throws IOException {
//need to see if the file is an index in file
//get the filestatus of the archive directory
// we will create fake filestatuses to return
// to the client
List<FileStatus> statuses = new ArrayList<FileStatus>();
Path tmpPath = makeQualified(f);
Path harPath = getPathInHar(tmpPath);
HarStatus hstatus = metadata.archive.get(harPath);
if (hstatus == null) {
throw new FileNotFoundException("File " + f + " not found in " + archivePath);
}
if (hstatus.isDir()) {
fileStatusesInIndex(hstatus, statuses);
} else {
statuses.add(toFileStatus(hstatus, null));
}
return statuses.toArray(new FileStatus[statuses.size()]);
}
/**
* return the top level archive path.
*/
@Override
public Path getHomeDirectory() {
return new Path(uri.toString());
}
@Override
public void setWorkingDirectory(Path newDir) {
//does nothing.
}
/**
* not implemented.
*/
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
throw new IOException("Har: mkdirs not allowed");
}
/**
* not implemented.
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path src, Path dst) throws IOException {
throw new IOException("Har: copyfromlocalfile not allowed");
}
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path[] srcs, Path dst) throws IOException {
throw new IOException("Har: copyfromlocalfile not allowed");
}
/**
* copies the file in the har filesystem to a local file.
*/
@Override
public void copyToLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
FileUtil.copy(this, src, getLocal(getConf()), dst, false, getConf());
}
/**
* not implemented.
*/
@Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
throw new IOException("Har: startLocalOutput not allowed");
}
/**
* not implemented.
*/
@Override
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
throw new IOException("Har: completeLocalOutput not allowed");
}
/**
* not implemented.
*/
@Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
throw new IOException("Har: setowner not allowed");
}
@Override
public void setTimes(Path p, long mtime, long atime) throws IOException {
throw new IOException("Har: setTimes not allowed");
}
/**
* Not implemented.
*/
@Override
public void setPermission(Path p, FsPermission permission)
throws IOException {
throw new IOException("Har: setPermission not allowed");
}
/**
* Hadoop archives input stream. This input stream fakes EOF
* since archive files are part of bigger part files.
*/
private static class HarFSDataInputStream extends FSDataInputStream {
/**
* Create an input stream that fakes all the reads/positions/seeking.
*/
private static class HarFsInputStream extends FSInputStream
implements CanSetDropBehind, CanSetReadahead {
private long position, start, end;
//The underlying data input stream that the
// underlying filesystem will return.
private final FSDataInputStream underLyingStream;
//one byte buffer
private final byte[] oneBytebuff = new byte[1];
HarFsInputStream(FileSystem fs, Path path, long start,
long length, int bufferSize) throws IOException {
if (length < 0) {
throw new IllegalArgumentException("Negative length ["+length+"]");
}
underLyingStream = fs.open(path, bufferSize);
underLyingStream.seek(start);
// the start of this file in the part file
this.start = start;
// the position pointer in the part file
this.position = start;
// the end pointer in the part file
this.end = start + length;
}
@Override
public synchronized int available() throws IOException {
long remaining = end - underLyingStream.getPos();
if (remaining > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
}
return (int) remaining;
}
@Override
public synchronized void close() throws IOException {
underLyingStream.close();
super.close();
}
//not implemented
@Override
public void mark(int readLimit) {
// do nothing
}
/**
* reset is not implemented
*/
@Override
public void reset() throws IOException {
throw new IOException("reset not implemented.");
}
@Override
public synchronized int read() throws IOException {
int ret = read(oneBytebuff, 0, 1);
return (ret <= 0) ? -1: (oneBytebuff[0] & 0xff);
}
// NB: currently this method actually never executed becusae
// java.io.DataInputStream.read(byte[]) directly delegates to
// method java.io.InputStream.read(byte[], int, int).
// However, potentially it can be invoked, so leave it intact for now.
@Override
public synchronized int read(byte[] b) throws IOException {
final int ret = read(b, 0, b.length);
return ret;
}
/**
*
*/
@Override
public synchronized int read(byte[] b, int offset, int len)
throws IOException {
int newlen = len;
int ret = -1;
if (position + len > end) {
newlen = (int) (end - position);
}
// end case
if (newlen == 0)
return ret;
ret = underLyingStream.read(b, offset, newlen);
position += ret;
return ret;
}
@Override
public synchronized long skip(long n) throws IOException {
long tmpN = n;
if (tmpN > 0) {
final long actualRemaining = end - position;
if (tmpN > actualRemaining) {
tmpN = actualRemaining;
}
underLyingStream.seek(tmpN + position);
position += tmpN;
return tmpN;
}
// NB: the contract is described in java.io.InputStream.skip(long):
// this method returns the number of bytes actually skipped, so,
// the return value should never be negative.
return 0;
}
@Override
public synchronized long getPos() throws IOException {
return (position - start);
}
@Override
public synchronized void seek(final long pos) throws IOException {
validatePosition(pos);
position = start + pos;
underLyingStream.seek(position);
}
private void validatePosition(final long pos) throws IOException {
if (pos < 0) {
throw new IOException("Negative position: "+pos);
}
final long length = end - start;
if (pos > length) {
throw new IOException("Position behind the end " +
"of the stream (length = "+length+"): " + pos);
}
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
// do not need to implement this
// hdfs in itself does seektonewsource
// while reading.
return false;
}
/**
* implementing position readable.
*/
@Override
public int read(long pos, byte[] b, int offset, int length)
throws IOException {
int nlength = length;
if (start + nlength + pos > end) {
// length corrected to the real remaining length:
nlength = (int) (end - start - pos);
}
if (nlength <= 0) {
// EOS:
return -1;
}
return underLyingStream.read(pos + start , b, offset, nlength);
}
/**
* position readable again.
*/
@Override
public void readFully(long pos, byte[] b, int offset, int length)
throws IOException {
if (start + length + pos > end) {
throw new IOException("Not enough bytes to read.");
}
underLyingStream.readFully(pos + start, b, offset, length);
}
@Override
public void readFully(long pos, byte[] b) throws IOException {
readFully(pos, b, 0, b.length);
}
@Override
public void setReadahead(Long readahead) throws IOException {
underLyingStream.setReadahead(readahead);
}
@Override
public void setDropBehind(Boolean dropBehind) throws IOException {
underLyingStream.setDropBehind(dropBehind);
}
}
/**
* constructors for har input stream.
* @param fs the underlying filesystem
* @param p The path in the underlying filesystem
* @param start the start position in the part file
* @param length the length of valid data in the part file
* @param bufsize the buffer size
* @throws IOException
*/
public HarFSDataInputStream(FileSystem fs, Path p, long start,
long length, int bufsize) throws IOException {
super(new HarFsInputStream(fs, p, start, length, bufsize));
}
}
private class HarMetaData {
private FileSystem fs;
private int version;
// the masterIndex of the archive
private Path masterIndexPath;
// the index file
private Path archiveIndexPath;
private long masterIndexTimestamp;
private long archiveIndexTimestamp;
List<Store> stores = new ArrayList<Store>();
Map<Path, HarStatus> archive = new HashMap<Path, HarStatus>();
private Map<Path, FileStatus> partFileStatuses = new HashMap<Path, FileStatus>();
public HarMetaData(FileSystem fs, Path masterIndexPath, Path archiveIndexPath) {
this.fs = fs;
this.masterIndexPath = masterIndexPath;
this.archiveIndexPath = archiveIndexPath;
}
public FileStatus getPartFileStatus(Path partPath) throws IOException {
FileStatus status;
status = partFileStatuses.get(partPath);
if (status == null) {
status = fs.getFileStatus(partPath);
partFileStatuses.put(partPath, status);
}
return status;
}
public long getMasterIndexTimestamp() {
return masterIndexTimestamp;
}
public long getArchiveIndexTimestamp() {
return archiveIndexTimestamp;
}
private int getVersion() {
return version;
}
private void parseMetaData() throws IOException {
Text line = new Text();
long read;
FSDataInputStream in = null;
LineReader lin = null;
try {
in = fs.open(masterIndexPath);
FileStatus masterStat = fs.getFileStatus(masterIndexPath);
masterIndexTimestamp = masterStat.getModificationTime();
lin = new LineReader(in, getConf());
read = lin.readLine(line);
// the first line contains the version of the index file
String versionLine = line.toString();
String[] arr = versionLine.split(" ");
version = Integer.parseInt(arr[0]);
// make it always backwards-compatible
if (this.version > HarFileSystem.VERSION) {
throw new IOException("Invalid version " +
this.version + " expected " + HarFileSystem.VERSION);
}
// each line contains a hashcode range and the index file name
String[] readStr;
while(read < masterStat.getLen()) {
int b = lin.readLine(line);
read += b;
readStr = line.toString().split(" ");
stores.add(new Store(Long.parseLong(readStr[2]),
Long.parseLong(readStr[3])));
line.clear();
}
} catch (IOException ioe) {
LOG.warn("Encountered exception ", ioe);
throw ioe;
} finally {
IOUtils.cleanup(LOG, lin, in);
}
FSDataInputStream aIn = fs.open(archiveIndexPath);
try {
FileStatus archiveStat = fs.getFileStatus(archiveIndexPath);
archiveIndexTimestamp = archiveStat.getModificationTime();
LineReader aLin;
// now start reading the real index file
for (Store s: stores) {
read = 0;
aIn.seek(s.begin);
aLin = new LineReader(aIn, getConf());
while (read + s.begin < s.end) {
int tmp = aLin.readLine(line);
read += tmp;
String lineFeed = line.toString();
String[] parsed = lineFeed.split(" ");
parsed[0] = decodeFileName(parsed[0]);
archive.put(new Path(parsed[0]), new HarStatus(lineFeed));
line.clear();
}
}
} finally {
IOUtils.cleanup(LOG, aIn);
}
}
}
/*
* testing purposes only:
*/
HarMetaData getMetadata() {
return metadata;
}
private static class LruCache<K, V> extends LinkedHashMap<K, V> {
private final int MAX_ENTRIES;
public LruCache(int maxEntries) {
super(maxEntries + 1, 1.0f, true);
MAX_ENTRIES = maxEntries;
}
@Override
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
return size() > MAX_ENTRIES;
}
}
@SuppressWarnings("deprecation")
@Override
public FsServerDefaults getServerDefaults() throws IOException {
return fs.getServerDefaults();
}
@Override
public FsServerDefaults getServerDefaults(Path f) throws IOException {
return fs.getServerDefaults(f);
}
@Override
public long getUsed() throws IOException{
return fs.getUsed();
}
@SuppressWarnings("deprecation")
@Override
public long getDefaultBlockSize() {
return fs.getDefaultBlockSize();
}
@SuppressWarnings("deprecation")
@Override
public long getDefaultBlockSize(Path f) {
return fs.getDefaultBlockSize(f);
}
@SuppressWarnings("deprecation")
@Override
public short getDefaultReplication() {
return fs.getDefaultReplication();
}
@Override
public short getDefaultReplication(Path f) {
return fs.getDefaultReplication(f);
}
}
| 38,852 | 29.738133 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasFileDescriptor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileDescriptor;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Having a FileDescriptor
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface HasFileDescriptor {
/**
* @return the FileDescriptor
* @throws IOException
*/
public FileDescriptor getFileDescriptor() throws IOException;
}
| 1,284 | 30.341463 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathPermissionException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/**
* Exception corresponding to Operation Not Permitted - EPERM
*/
public class PathPermissionException extends PathIOException {
static final long serialVersionUID = 0L;
/** @param path for the exception */
public PathPermissionException(String path) {
super(path, "Operation not permitted");
}
}
| 1,150 | 38.689655 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockStoragePolicySpi.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A storage policy specifies the placement of block replicas on specific
* storage types.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface BlockStoragePolicySpi {
/**
* Return the name of the storage policy. Policies are uniquely
* identified by name.
*
* @return the name of the storage policy.
*/
String getName();
/**
* Return the preferred storage types associated with this policy. These
* storage types are used sequentially for successive block replicas.
*
* @return preferred storage types used for placing block replicas.
*/
StorageType[] getStorageTypes();
/**
* Get the fallback storage types for creating new block replicas. Fallback
* storage types are used if the preferred storage types are not available.
*
* @return fallback storage types for new block replicas..
*/
StorageType[] getCreationFallbacks();
/**
* Get the fallback storage types for replicating existing block replicas.
* Fallback storage types are used if the preferred storage types are not
* available.
*
* @return fallback storage types for replicating existing block replicas.
*/
StorageType[] getReplicationFallbacks();
/**
* Returns true if the policy is inherit-only and cannot be changed for
* an existing file.
*
* @return true if the policy is inherit-only.
*/
boolean isCopyOnCreateFile();
}
| 2,373 | 31.520548 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
/**
* Provides a trash facility which supports pluggable Trash policies.
*
* See the implementation of the configured TrashPolicy for more
* details.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class Trash extends Configured {
private static final org.apache.commons.logging.Log LOG =
LogFactory.getLog(Trash.class);
private TrashPolicy trashPolicy; // configured trash policy instance
/**
* Construct a trash can accessor.
* @param conf a Configuration
*/
public Trash(Configuration conf) throws IOException {
this(FileSystem.get(conf), conf);
}
/**
* Construct a trash can accessor for the FileSystem provided.
* @param fs the FileSystem
* @param conf a Configuration
*/
public Trash(FileSystem fs, Configuration conf) throws IOException {
super(conf);
trashPolicy = TrashPolicy.getInstance(conf, fs, fs.getHomeDirectory());
}
/**
* In case of the symlinks or mount points, one has to move the appropriate
* trashbin in the actual volume of the path p being deleted.
*
* Hence we get the file system of the fully-qualified resolved-path and
* then move the path p to the trashbin in that volume,
* @param fs - the filesystem of path p
* @param p - the path being deleted - to be moved to trasg
* @param conf - configuration
* @return false if the item is already in the trash or trash is disabled
* @throws IOException on error
*/
public static boolean moveToAppropriateTrash(FileSystem fs, Path p,
Configuration conf) throws IOException {
Path fullyResolvedPath = fs.resolvePath(p);
FileSystem fullyResolvedFs =
FileSystem.get(fullyResolvedPath.toUri(), conf);
// If the trash interval is configured server side then clobber this
// configuration so that we always respect the server configuration.
try {
long trashInterval = fullyResolvedFs.getServerDefaults(
fullyResolvedPath).getTrashInterval();
if (0 != trashInterval) {
Configuration confCopy = new Configuration(conf);
confCopy.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY,
trashInterval);
conf = confCopy;
}
} catch (Exception e) {
// If we can not determine that trash is enabled server side then
// bail rather than potentially deleting a file when trash is enabled.
LOG.warn("Failed to get server trash configuration", e);
throw new IOException("Failed to get server trash configuration", e);
}
Trash trash = new Trash(fullyResolvedFs, conf);
boolean success = trash.moveToTrash(fullyResolvedPath);
if (success) {
System.out.println("Moved: '" + p + "' to trash at: " +
trash.getCurrentTrashDir() );
}
return success;
}
/**
* Returns whether the trash is enabled for this filesystem
*/
public boolean isEnabled() {
return trashPolicy.isEnabled();
}
/** Move a file or directory to the current trash directory.
* @return false if the item is already in the trash or trash is disabled
*/
public boolean moveToTrash(Path path) throws IOException {
return trashPolicy.moveToTrash(path);
}
/** Create a trash checkpoint. */
public void checkpoint() throws IOException {
trashPolicy.createCheckpoint();
}
/** Delete old checkpoint(s). */
public void expunge() throws IOException {
trashPolicy.deleteCheckpoint();
}
/** get the current working directory */
Path getCurrentTrashDir() {
return trashPolicy.getCurrentTrashDir();
}
/** get the configured trash policy */
TrashPolicy getTrashPolicy() {
return trashPolicy;
}
/** Return a {@link Runnable} that periodically empties the trash of all
* users, intended to be run by the superuser.
*/
public Runnable getEmptier() throws IOException {
return trashPolicy.getEmptier();
}
}
| 5,011 | 33.805556 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import com.google.common.annotations.VisibleForTesting;
import java.io.BufferedOutputStream;
import java.io.DataOutput;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.FileDescriptor;
import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.BasicFileAttributeView;
import java.nio.file.attribute.FileTime;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.StringTokenizer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
/****************************************************************
* Implement the FileSystem API for the raw local filesystem.
*
*****************************************************************/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class RawLocalFileSystem extends FileSystem {
static final URI NAME = URI.create("file:///");
private Path workingDir;
// Temporary workaround for HADOOP-9652.
private static boolean useDeprecatedFileStatus = true;
@VisibleForTesting
public static void useStatIfAvailable() {
useDeprecatedFileStatus = !Stat.isAvailable();
}
public RawLocalFileSystem() {
workingDir = getInitialWorkingDirectory();
}
private Path makeAbsolute(Path f) {
if (f.isAbsolute()) {
return f;
} else {
return new Path(workingDir, f);
}
}
/** Convert a path to a File. */
public File pathToFile(Path path) {
checkPath(path);
if (!path.isAbsolute()) {
path = new Path(getWorkingDirectory(), path);
}
return new File(path.toUri().getPath());
}
@Override
public URI getUri() { return NAME; }
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
setConf(conf);
}
/*******************************************************
* For open()'s FSInputStream.
*******************************************************/
class LocalFSFileInputStream extends FSInputStream implements HasFileDescriptor {
private FileInputStream fis;
private long position;
public LocalFSFileInputStream(Path f) throws IOException {
fis = new FileInputStream(pathToFile(f));
}
@Override
public void seek(long pos) throws IOException {
if (pos < 0) {
throw new EOFException(
FSExceptionMessages.NEGATIVE_SEEK);
}
fis.getChannel().position(pos);
this.position = pos;
}
@Override
public long getPos() throws IOException {
return this.position;
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
/*
* Just forward to the fis
*/
@Override
public int available() throws IOException { return fis.available(); }
@Override
public void close() throws IOException { fis.close(); }
@Override
public boolean markSupported() { return false; }
@Override
public int read() throws IOException {
try {
int value = fis.read();
if (value >= 0) {
this.position++;
statistics.incrementBytesRead(1);
}
return value;
} catch (IOException e) { // unexpected exception
throw new FSError(e); // assume native fs error
}
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
try {
int value = fis.read(b, off, len);
if (value > 0) {
this.position += value;
statistics.incrementBytesRead(value);
}
return value;
} catch (IOException e) { // unexpected exception
throw new FSError(e); // assume native fs error
}
}
@Override
public int read(long position, byte[] b, int off, int len)
throws IOException {
ByteBuffer bb = ByteBuffer.wrap(b, off, len);
try {
int value = fis.getChannel().read(bb, position);
if (value > 0) {
statistics.incrementBytesRead(value);
}
return value;
} catch (IOException e) {
throw new FSError(e);
}
}
@Override
public long skip(long n) throws IOException {
long value = fis.skip(n);
if (value > 0) {
this.position += value;
}
return value;
}
@Override
public FileDescriptor getFileDescriptor() throws IOException {
return fis.getFD();
}
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
if (!exists(f)) {
throw new FileNotFoundException(f.toString());
}
return new FSDataInputStream(new BufferedFSInputStream(
new LocalFSFileInputStream(f), bufferSize));
}
/*********************************************************
* For create()'s FSOutputStream.
*********************************************************/
class LocalFSFileOutputStream extends OutputStream {
private FileOutputStream fos;
private LocalFSFileOutputStream(Path f, boolean append,
FsPermission permission) throws IOException {
File file = pathToFile(f);
if (permission == null) {
this.fos = new FileOutputStream(file, append);
} else {
if (Shell.WINDOWS && NativeIO.isAvailable()) {
this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file,
append, permission.toShort());
} else {
this.fos = new FileOutputStream(file, append);
boolean success = false;
try {
setPermission(f, permission);
success = true;
} finally {
if (!success) {
IOUtils.cleanup(LOG, this.fos);
}
}
}
}
}
/*
* Just forward to the fos
*/
@Override
public void close() throws IOException { fos.close(); }
@Override
public void flush() throws IOException { fos.flush(); }
@Override
public void write(byte[] b, int off, int len) throws IOException {
try {
fos.write(b, off, len);
} catch (IOException e) { // unexpected exception
throw new FSError(e); // assume native fs error
}
}
@Override
public void write(int b) throws IOException {
try {
fos.write(b);
} catch (IOException e) { // unexpected exception
throw new FSError(e); // assume native fs error
}
}
}
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
if (!exists(f)) {
throw new FileNotFoundException("File " + f + " not found");
}
FileStatus status = getFileStatus(f);
if (status.isDirectory()) {
throw new IOException("Cannot append to a diretory (=" + f + " )");
}
return new FSDataOutputStream(new BufferedOutputStream(
createOutputStreamWithMode(f, true, null), bufferSize), statistics,
status.getLen());
}
@Override
public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
short replication, long blockSize, Progressable progress)
throws IOException {
return create(f, overwrite, true, bufferSize, replication, blockSize,
progress, null);
}
private FSDataOutputStream create(Path f, boolean overwrite,
boolean createParent, int bufferSize, short replication, long blockSize,
Progressable progress, FsPermission permission) throws IOException {
if (exists(f) && !overwrite) {
throw new FileAlreadyExistsException("File already exists: " + f);
}
Path parent = f.getParent();
if (parent != null && !mkdirs(parent)) {
throw new IOException("Mkdirs failed to create " + parent.toString());
}
return new FSDataOutputStream(new BufferedOutputStream(
createOutputStreamWithMode(f, false, permission), bufferSize),
statistics);
}
protected OutputStream createOutputStream(Path f, boolean append)
throws IOException {
return createOutputStreamWithMode(f, append, null);
}
protected OutputStream createOutputStreamWithMode(Path f, boolean append,
FsPermission permission) throws IOException {
return new LocalFSFileOutputStream(f, append, permission);
}
@Override
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
if (exists(f) && !flags.contains(CreateFlag.OVERWRITE)) {
throw new FileAlreadyExistsException("File already exists: " + f);
}
return new FSDataOutputStream(new BufferedOutputStream(
createOutputStreamWithMode(f, false, permission), bufferSize),
statistics);
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
FSDataOutputStream out = create(f, overwrite, true, bufferSize, replication,
blockSize, progress, permission);
return out;
}
@Override
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite,
int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
FSDataOutputStream out = create(f, overwrite, false, bufferSize, replication,
blockSize, progress, permission);
return out;
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
// Attempt rename using Java API.
File srcFile = pathToFile(src);
File dstFile = pathToFile(dst);
if (srcFile.renameTo(dstFile)) {
return true;
}
// Else try POSIX style rename on Windows only
if (Shell.WINDOWS &&
handleEmptyDstDirectoryOnWindows(src, srcFile, dst, dstFile)) {
return true;
}
// The fallback behavior accomplishes the rename by a full copy.
if (LOG.isDebugEnabled()) {
LOG.debug("Falling through to a copy of " + src + " to " + dst);
}
return FileUtil.copy(this, src, this, dst, true, getConf());
}
@VisibleForTesting
public final boolean handleEmptyDstDirectoryOnWindows(Path src, File srcFile,
Path dst, File dstFile) throws IOException {
// Enforce POSIX rename behavior that a source directory replaces an
// existing destination if the destination is an empty directory. On most
// platforms, this is already handled by the Java API call above. Some
// platforms (notably Windows) do not provide this behavior, so the Java API
// call renameTo(dstFile) fails. Delete destination and attempt rename
// again.
if (this.exists(dst)) {
FileStatus sdst = this.getFileStatus(dst);
if (sdst.isDirectory() && dstFile.list().length == 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting empty destination and renaming " + src + " to " +
dst);
}
if (this.delete(dst, false) && srcFile.renameTo(dstFile)) {
return true;
}
}
}
return false;
}
@Override
public boolean truncate(Path f, final long newLength) throws IOException {
FileStatus status = getFileStatus(f);
if(status == null) {
throw new FileNotFoundException("File " + f + " not found");
}
if(status.isDirectory()) {
throw new IOException("Cannot truncate a directory (=" + f + ")");
}
long oldLength = status.getLen();
if(newLength > oldLength) {
throw new IllegalArgumentException(
"Cannot truncate to a larger file size. Current size: " + oldLength +
", truncate size: " + newLength + ".");
}
try (FileOutputStream out = new FileOutputStream(pathToFile(f), true)) {
try {
out.getChannel().truncate(newLength);
} catch(IOException e) {
throw new FSError(e);
}
}
return true;
}
/**
* Delete the given path to a file or directory.
* @param p the path to delete
* @param recursive to delete sub-directories
* @return true if the file or directory and all its contents were deleted
* @throws IOException if p is non-empty and recursive is false
*/
@Override
public boolean delete(Path p, boolean recursive) throws IOException {
File f = pathToFile(p);
if (!f.exists()) {
//no path, return false "nothing to delete"
return false;
}
if (f.isFile()) {
return f.delete();
} else if (!recursive && f.isDirectory() &&
(FileUtil.listFiles(f).length != 0)) {
throw new IOException("Directory " + f.toString() + " is not empty");
}
return FileUtil.fullyDelete(f);
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
File localf = pathToFile(f);
FileStatus[] results;
if (!localf.exists()) {
throw new FileNotFoundException("File " + f + " does not exist");
}
if (localf.isDirectory()) {
String[] names = localf.list();
if (names == null) {
return null;
}
results = new FileStatus[names.length];
int j = 0;
for (int i = 0; i < names.length; i++) {
try {
// Assemble the path using the Path 3 arg constructor to make sure
// paths with colon are properly resolved on Linux
results[j] = getFileStatus(new Path(f, new Path(null, null,
names[i])));
j++;
} catch (FileNotFoundException e) {
// ignore the files not found since the dir list may have have
// changed since the names[] list was generated.
}
}
if (j == names.length) {
return results;
}
return Arrays.copyOf(results, j);
}
if (!useDeprecatedFileStatus) {
return new FileStatus[] { getFileStatus(f) };
}
return new FileStatus[] {
new DeprecatedRawLocalFileStatus(localf,
getDefaultBlockSize(f), this) };
}
protected boolean mkOneDir(File p2f) throws IOException {
return mkOneDirWithMode(new Path(p2f.getAbsolutePath()), p2f, null);
}
protected boolean mkOneDirWithMode(Path p, File p2f, FsPermission permission)
throws IOException {
if (permission == null) {
return p2f.mkdir();
} else {
if (Shell.WINDOWS && NativeIO.isAvailable()) {
try {
NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());
return true;
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format(
"NativeIO.createDirectoryWithMode error, path = %s, mode = %o",
p2f, permission.toShort()), e);
}
return false;
}
} else {
boolean b = p2f.mkdir();
if (b) {
setPermission(p, permission);
}
return b;
}
}
}
/**
* Creates the specified directory hierarchy. Does not
* treat existence as an error.
*/
@Override
public boolean mkdirs(Path f) throws IOException {
return mkdirsWithOptionalPermission(f, null);
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
return mkdirsWithOptionalPermission(f, permission);
}
private boolean mkdirsWithOptionalPermission(Path f, FsPermission permission)
throws IOException {
if(f == null) {
throw new IllegalArgumentException("mkdirs path arg is null");
}
Path parent = f.getParent();
File p2f = pathToFile(f);
File parent2f = null;
if(parent != null) {
parent2f = pathToFile(parent);
if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
throw new ParentNotDirectoryException("Parent path is not a directory: "
+ parent);
}
}
if (p2f.exists() && !p2f.isDirectory()) {
throw new FileNotFoundException("Destination exists" +
" and is not a directory: " + p2f.getCanonicalPath());
}
return (parent == null || parent2f.exists() || mkdirs(parent)) &&
(mkOneDirWithMode(f, p2f, permission) || p2f.isDirectory());
}
@Override
public Path getHomeDirectory() {
return this.makeQualified(new Path(System.getProperty("user.home")));
}
/**
* Set the working directory to the given directory.
*/
@Override
public void setWorkingDirectory(Path newDir) {
workingDir = makeAbsolute(newDir);
checkPath(workingDir);
}
@Override
public Path getWorkingDirectory() {
return workingDir;
}
@Override
protected Path getInitialWorkingDirectory() {
return this.makeQualified(new Path(System.getProperty("user.dir")));
}
@Override
public FsStatus getStatus(Path p) throws IOException {
File partition = pathToFile(p == null ? new Path("/") : p);
//File provides getUsableSpace() and getFreeSpace()
//File provides no API to obtain used space, assume used = total - free
return new FsStatus(partition.getTotalSpace(),
partition.getTotalSpace() - partition.getFreeSpace(),
partition.getFreeSpace());
}
// In the case of the local filesystem, we can just rename the file.
@Override
public void moveFromLocalFile(Path src, Path dst) throws IOException {
rename(src, dst);
}
// We can write output directly to the final location
@Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
return fsOutputFile;
}
// It's in the right place - nothing to do.
@Override
public void completeLocalOutput(Path fsWorkingFile, Path tmpLocalFile)
throws IOException {
}
@Override
public void close() throws IOException {
super.close();
}
@Override
public String toString() {
return "LocalFS";
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return getFileLinkStatusInternal(f, true);
}
@Deprecated
private FileStatus deprecatedGetFileStatus(Path f) throws IOException {
File path = pathToFile(f);
if (path.exists()) {
return new DeprecatedRawLocalFileStatus(pathToFile(f),
getDefaultBlockSize(f), this);
} else {
throw new FileNotFoundException("File " + f + " does not exist");
}
}
@Deprecated
static class DeprecatedRawLocalFileStatus extends FileStatus {
/* We can add extra fields here. It breaks at least CopyFiles.FilePair().
* We recognize if the information is already loaded by check if
* onwer.equals("").
*/
private boolean isPermissionLoaded() {
return !super.getOwner().isEmpty();
}
DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs)
throws IOException {
super(f.length(), f.isDirectory(), 1, defaultBlockSize,
f.lastModified(),
Files.readAttributes(f.toPath(),
BasicFileAttributes.class).lastAccessTime().toMillis(),
null, null, null,
new Path(f.getPath()).makeQualified(fs.getUri(),
fs.getWorkingDirectory()));
}
@Override
public FsPermission getPermission() {
if (!isPermissionLoaded()) {
loadPermissionInfo();
}
return super.getPermission();
}
@Override
public String getOwner() {
if (!isPermissionLoaded()) {
loadPermissionInfo();
}
return super.getOwner();
}
@Override
public String getGroup() {
if (!isPermissionLoaded()) {
loadPermissionInfo();
}
return super.getGroup();
}
/// loads permissions, owner, and group from `ls -ld`
private void loadPermissionInfo() {
IOException e = null;
try {
String output = FileUtil.execCommand(new File(getPath().toUri()),
Shell.getGetPermissionCommand());
StringTokenizer t =
new StringTokenizer(output, Shell.TOKEN_SEPARATOR_REGEX);
//expected format
//-rw------- 1 username groupname ...
String permission = t.nextToken();
if (permission.length() > FsPermission.MAX_PERMISSION_LENGTH) {
//files with ACLs might have a '+'
permission = permission.substring(0,
FsPermission.MAX_PERMISSION_LENGTH);
}
setPermission(FsPermission.valueOf(permission));
t.nextToken();
String owner = t.nextToken();
// If on windows domain, token format is DOMAIN\\user and we want to
// extract only the user name
if (Shell.WINDOWS) {
int i = owner.indexOf('\\');
if (i != -1)
owner = owner.substring(i + 1);
}
setOwner(owner);
setGroup(t.nextToken());
} catch (Shell.ExitCodeException ioe) {
if (ioe.getExitCode() != 1) {
e = ioe;
} else {
setPermission(null);
setOwner(null);
setGroup(null);
}
} catch (IOException ioe) {
e = ioe;
} finally {
if (e != null) {
throw new RuntimeException("Error while running command to get " +
"file permissions : " +
StringUtils.stringifyException(e));
}
}
}
@Override
public void write(DataOutput out) throws IOException {
if (!isPermissionLoaded()) {
loadPermissionInfo();
}
super.write(out);
}
}
/**
* Use the command chown to set owner.
*/
@Override
public void setOwner(Path p, String username, String groupname)
throws IOException {
FileUtil.setOwner(pathToFile(p), username, groupname);
}
/**
* Use the command chmod to set permission.
*/
@Override
public void setPermission(Path p, FsPermission permission)
throws IOException {
if (NativeIO.isAvailable()) {
NativeIO.POSIX.chmod(pathToFile(p).getCanonicalPath(),
permission.toShort());
} else {
String perm = String.format("%04o", permission.toShort());
Shell.execCommand(Shell.getSetPermissionCommand(perm, false,
FileUtil.makeShellPath(pathToFile(p), true)));
}
}
/**
* Sets the {@link Path}'s last modified time and last access time to
* the given valid times.
*
* @param mtime the modification time to set (only if greater than zero).
* @param atime the access time to set (only if greater than zero).
* @throws IOException if setting the times fails.
*/
@Override
public void setTimes(Path p, long mtime, long atime) throws IOException {
BasicFileAttributeView view = Files.getFileAttributeView(
pathToFile(p).toPath(), BasicFileAttributeView.class);
FileTime fmtime = (mtime >= 0) ? FileTime.fromMillis(mtime) : null;
FileTime fatime = (atime >= 0) ? FileTime.fromMillis(atime) : null;
view.setTimes(fmtime, fatime, null);
}
@Override
public boolean supportsSymlinks() {
return true;
}
@SuppressWarnings("deprecation")
@Override
public void createSymlink(Path target, Path link, boolean createParent)
throws IOException {
if (!FileSystem.areSymlinksEnabled()) {
throw new UnsupportedOperationException("Symlinks not supported");
}
final String targetScheme = target.toUri().getScheme();
if (targetScheme != null && !"file".equals(targetScheme)) {
throw new IOException("Unable to create symlink to non-local file "+
"system: "+target.toString());
}
if (createParent) {
mkdirs(link.getParent());
}
// NB: Use createSymbolicLink in java.nio.file.Path once available
int result = FileUtil.symLink(target.toString(),
makeAbsolute(link).toString());
if (result != 0) {
throw new IOException("Error " + result + " creating symlink " +
link + " to " + target);
}
}
/**
* Return a FileStatus representing the given path. If the path refers
* to a symlink return a FileStatus representing the link rather than
* the object the link refers to.
*/
@Override
public FileStatus getFileLinkStatus(final Path f) throws IOException {
FileStatus fi = getFileLinkStatusInternal(f, false);
// getFileLinkStatus is supposed to return a symlink with a
// qualified path
if (fi.isSymlink()) {
Path targetQual = FSLinkResolver.qualifySymlinkTarget(this.getUri(),
fi.getPath(), fi.getSymlink());
fi.setSymlink(targetQual);
}
return fi;
}
/**
* Public {@link FileStatus} methods delegate to this function, which in turn
* either call the new {@link Stat} based implementation or the deprecated
* methods based on platform support.
*
* @param f Path to stat
* @param dereference whether to dereference the final path component if a
* symlink
* @return FileStatus of f
* @throws IOException
*/
private FileStatus getFileLinkStatusInternal(final Path f,
boolean dereference) throws IOException {
if (!useDeprecatedFileStatus) {
return getNativeFileLinkStatus(f, dereference);
} else if (dereference) {
return deprecatedGetFileStatus(f);
} else {
return deprecatedGetFileLinkStatusInternal(f);
}
}
/**
* Deprecated. Remains for legacy support. Should be removed when {@link Stat}
* gains support for Windows and other operating systems.
*/
@Deprecated
private FileStatus deprecatedGetFileLinkStatusInternal(final Path f)
throws IOException {
String target = FileUtil.readLink(new File(f.toString()));
try {
FileStatus fs = getFileStatus(f);
// If f refers to a regular file or directory
if (target.isEmpty()) {
return fs;
}
// Otherwise f refers to a symlink
return new FileStatus(fs.getLen(),
false,
fs.getReplication(),
fs.getBlockSize(),
fs.getModificationTime(),
fs.getAccessTime(),
fs.getPermission(),
fs.getOwner(),
fs.getGroup(),
new Path(target),
f);
} catch (FileNotFoundException e) {
/* The exists method in the File class returns false for dangling
* links so we can get a FileNotFoundException for links that exist.
* It's also possible that we raced with a delete of the link. Use
* the readBasicFileAttributes method in java.nio.file.attributes
* when available.
*/
if (!target.isEmpty()) {
return new FileStatus(0, false, 0, 0, 0, 0, FsPermission.getDefault(),
"", "", new Path(target), f);
}
// f refers to a file or directory that does not exist
throw e;
}
}
/**
* Calls out to platform's native stat(1) implementation to get file metadata
* (permissions, user, group, atime, mtime, etc). This works around the lack
* of lstat(2) in Java 6.
*
* Currently, the {@link Stat} class used to do this only supports Linux
* and FreeBSD, so the old {@link #deprecatedGetFileLinkStatusInternal(Path)}
* implementation (deprecated) remains further OS support is added.
*
* @param f File to stat
* @param dereference whether to dereference symlinks
* @return FileStatus of f
* @throws IOException
*/
private FileStatus getNativeFileLinkStatus(final Path f,
boolean dereference) throws IOException {
checkPath(f);
Stat stat = new Stat(f, getDefaultBlockSize(f), dereference, this);
FileStatus status = stat.getFileStatus();
return status;
}
@Override
public Path getLinkTarget(Path f) throws IOException {
FileStatus fi = getFileLinkStatusInternal(f, false);
// return an unqualified symlink target
return fi.getSymlink();
}
}
| 29,320 | 30.974918 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.util.DataChecksum;
/****************************************************
* Provides server default configuration values to clients.
*
****************************************************/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class FsServerDefaults implements Writable {
static { // register a ctor
WritableFactories.setFactory(FsServerDefaults.class, new WritableFactory() {
@Override
public Writable newInstance() {
return new FsServerDefaults();
}
});
}
private long blockSize;
private int bytesPerChecksum;
private int writePacketSize;
private short replication;
private int fileBufferSize;
private boolean encryptDataTransfer;
private long trashInterval;
private DataChecksum.Type checksumType;
public FsServerDefaults() {
}
public FsServerDefaults(long blockSize, int bytesPerChecksum,
int writePacketSize, short replication, int fileBufferSize,
boolean encryptDataTransfer, long trashInterval,
DataChecksum.Type checksumType) {
this.blockSize = blockSize;
this.bytesPerChecksum = bytesPerChecksum;
this.writePacketSize = writePacketSize;
this.replication = replication;
this.fileBufferSize = fileBufferSize;
this.encryptDataTransfer = encryptDataTransfer;
this.trashInterval = trashInterval;
this.checksumType = checksumType;
}
public long getBlockSize() {
return blockSize;
}
public int getBytesPerChecksum() {
return bytesPerChecksum;
}
public int getWritePacketSize() {
return writePacketSize;
}
public short getReplication() {
return replication;
}
public int getFileBufferSize() {
return fileBufferSize;
}
public boolean getEncryptDataTransfer() {
return encryptDataTransfer;
}
public long getTrashInterval() {
return trashInterval;
}
public DataChecksum.Type getChecksumType() {
return checksumType;
}
// /////////////////////////////////////////
// Writable
// /////////////////////////////////////////
@Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(blockSize);
out.writeInt(bytesPerChecksum);
out.writeInt(writePacketSize);
out.writeShort(replication);
out.writeInt(fileBufferSize);
WritableUtils.writeEnum(out, checksumType);
}
@Override
@InterfaceAudience.Private
public void readFields(DataInput in) throws IOException {
blockSize = in.readLong();
bytesPerChecksum = in.readInt();
writePacketSize = in.readInt();
replication = in.readShort();
fileBufferSize = in.readInt();
checksumType = WritableUtils.readEnum(in, DataChecksum.Type.class);
}
}
| 3,951 | 28.939394 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.StringTokenizer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Shell;
import com.google.common.annotations.VisibleForTesting;
/**
* Wrapper for the Unix stat(1) command. Used to workaround the lack of
* lstat(2) in Java 6.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class Stat extends Shell {
private final Path original;
private final Path qualified;
private final Path path;
private final long blockSize;
private final boolean dereference;
private FileStatus stat;
public Stat(Path path, long blockSize, boolean deref, FileSystem fs)
throws IOException {
super(0L, true);
// Original path
this.original = path;
// Qualify the original and strip out URI fragment via toUri().getPath()
Path stripped = new Path(
original.makeQualified(fs.getUri(), fs.getWorkingDirectory())
.toUri().getPath());
// Re-qualify the bare stripped path and store it
this.qualified =
stripped.makeQualified(fs.getUri(), fs.getWorkingDirectory());
// Strip back down to a plain path
this.path = new Path(qualified.toUri().getPath());
this.blockSize = blockSize;
this.dereference = deref;
// LANG = C setting
Map<String, String> env = new HashMap<String, String>();
env.put("LANG", "C");
setEnvironment(env);
}
public FileStatus getFileStatus() throws IOException {
run();
return stat;
}
/**
* Whether Stat is supported on the current platform
* @return
*/
public static boolean isAvailable() {
if (Shell.LINUX || Shell.FREEBSD || Shell.MAC) {
return true;
}
return false;
}
@VisibleForTesting
FileStatus getFileStatusForTesting() {
return stat;
}
@Override
protected String[] getExecString() {
String derefFlag = "-";
if (dereference) {
derefFlag = "-L";
}
if (Shell.LINUX) {
return new String[] {
"stat", derefFlag + "c", "%s,%F,%Y,%X,%a,%U,%G,%N", path.toString() };
} else if (Shell.FREEBSD || Shell.MAC) {
return new String[] {
"stat", derefFlag + "f", "%z,%HT,%m,%a,%Op,%Su,%Sg,`link' -> `%Y'",
path.toString() };
} else {
throw new UnsupportedOperationException(
"stat is not supported on this platform");
}
}
@Override
protected void parseExecResult(BufferedReader lines) throws IOException {
// Reset stat
stat = null;
String line = lines.readLine();
if (line == null) {
throw new IOException("Unable to stat path: " + original);
}
if (line.endsWith("No such file or directory") ||
line.endsWith("Not a directory")) {
throw new FileNotFoundException("File " + original + " does not exist");
}
if (line.endsWith("Too many levels of symbolic links")) {
throw new IOException("Possible cyclic loop while following symbolic" +
" link " + original);
}
// 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,`link' -> `target'
// OR
// 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,'link' -> 'target'
StringTokenizer tokens = new StringTokenizer(line, ",");
try {
long length = Long.parseLong(tokens.nextToken());
boolean isDir = tokens.nextToken().equalsIgnoreCase("directory") ? true
: false;
// Convert from seconds to milliseconds
long modTime = Long.parseLong(tokens.nextToken())*1000;
long accessTime = Long.parseLong(tokens.nextToken())*1000;
String octalPerms = tokens.nextToken();
// FreeBSD has extra digits beyond 4, truncate them
if (octalPerms.length() > 4) {
int len = octalPerms.length();
octalPerms = octalPerms.substring(len-4, len);
}
FsPermission perms = new FsPermission(Short.parseShort(octalPerms, 8));
String owner = tokens.nextToken();
String group = tokens.nextToken();
String symStr = tokens.nextToken();
// 'notalink'
// `link' -> `target' OR 'link' -> 'target'
// '' -> ''
Path symlink = null;
String parts[] = symStr.split(" -> ");
try {
String target = parts[1];
target = target.substring(1, target.length()-1);
if (!target.isEmpty()) {
symlink = new Path(target);
}
} catch (ArrayIndexOutOfBoundsException e) {
// null if not a symlink
}
// Set stat
stat = new FileStatus(length, isDir, 1, blockSize, modTime, accessTime,
perms, owner, group, symlink, qualified);
} catch (NumberFormatException e) {
throw new IOException("Unexpected stat output: " + line, e);
} catch (NoSuchElementException e) {
throw new IOException("Unexpected stat output: " + line, e);
}
}
}
| 5,985 | 33.205714 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemLinkResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* FileSystem-specific class used to operate on and resolve symlinks in a path.
* Operation can potentially span multiple {@link FileSystem}s.
*
* @see FSLinkResolver
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class FileSystemLinkResolver<T> {
/**
* FileSystem subclass-specific implementation of superclass method.
* Overridden on instantiation to perform the actual method call, which throws
* an UnresolvedLinkException if called on an unresolved {@link Path}.
* @param p Path on which to perform an operation
* @return Generic type returned by operation
* @throws IOException
* @throws UnresolvedLinkException
*/
abstract public T doCall(final Path p) throws IOException,
UnresolvedLinkException;
/**
* Calls the abstract FileSystem call equivalent to the specialized subclass
* implementation in {@link #doCall(Path)}. This is used when retrying the
* call with a newly resolved Path and corresponding new FileSystem.
*
* @param fs
* FileSystem with which to retry call
* @param p
* Resolved Target of path
* @return Generic type determined by implementation
* @throws IOException
*/
abstract public T next(final FileSystem fs, final Path p) throws IOException;
/**
* Attempt calling overridden {@link #doCall(Path)} method with
* specified {@link FileSystem} and {@link Path}. If the call fails with an
* UnresolvedLinkException, it will try to resolve the path and retry the call
* by calling {@link #next(FileSystem, Path)}.
* @param filesys FileSystem with which to try call
* @param path Path with which to try call
* @return Generic type determined by implementation
* @throws IOException
*/
public T resolve(final FileSystem filesys, final Path path)
throws IOException {
int count = 0;
T in = null;
Path p = path;
// Assumes path belongs to this FileSystem.
// Callers validate this by passing paths through FileSystem#checkPath
FileSystem fs = filesys;
for (boolean isLink = true; isLink;) {
try {
in = doCall(p);
isLink = false;
} catch (UnresolvedLinkException e) {
if (!filesys.resolveSymlinks) {
throw new IOException("Path " + path + " contains a symlink"
+ " and symlink resolution is disabled ("
+ CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY
+ ").", e);
}
if (!FileSystem.areSymlinksEnabled()) {
throw new IOException("Symlink resolution is disabled in" +
" this version of Hadoop.");
}
if (count++ > FsConstants.MAX_PATH_LINKS) {
throw new IOException("Possible cyclic loop while " +
"following symbolic link " + path);
}
// Resolve the first unresolved path component
p = FSLinkResolver.qualifySymlinkTarget(fs.getUri(), p,
filesys.resolveLink(p));
fs = FileSystem.getFSofPath(p, filesys.getConf());
// Have to call next if it's a new FS
if (!fs.equals(filesys)) {
return next(fs, p);
}
// Else, we keep resolving with this filesystem
}
}
// Successful call, path was fully resolved
return in;
}
}
| 4,317 | 37.212389 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.Random;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/****************************************************************
* Implement the FileSystem API for the checksumed local filesystem.
*
*****************************************************************/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class LocalFileSystem extends ChecksumFileSystem {
static final URI NAME = URI.create("file:///");
static private Random rand = new Random();
public LocalFileSystem() {
this(new RawLocalFileSystem());
}
@Override
public void initialize(URI name, Configuration conf) throws IOException {
if (fs.getConf() == null) {
fs.initialize(name, conf);
}
String scheme = name.getScheme();
if (!scheme.equals(fs.getUri().getScheme())) {
swapScheme = scheme;
}
}
/**
* Return the protocol scheme for the FileSystem.
* <p/>
*
* @return <code>file</code>
*/
@Override
public String getScheme() {
return "file";
}
public FileSystem getRaw() {
return getRawFileSystem();
}
public LocalFileSystem(FileSystem rawLocalFileSystem) {
super(rawLocalFileSystem);
}
/** Convert a path to a File. */
public File pathToFile(Path path) {
return ((RawLocalFileSystem)fs).pathToFile(path);
}
@Override
public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
FileUtil.copy(this, src, this, dst, delSrc, getConf());
}
@Override
public void copyToLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
FileUtil.copy(this, src, this, dst, delSrc, getConf());
}
/**
* Moves files to a bad file directory on the same device, so that their
* storage will not be reused.
*/
@Override
public boolean reportChecksumFailure(Path p, FSDataInputStream in,
long inPos,
FSDataInputStream sums, long sumsPos) {
try {
// canonicalize f
File f = ((RawLocalFileSystem)fs).pathToFile(p).getCanonicalFile();
// find highest writable parent dir of f on the same device
String device = new DF(f, getConf()).getMount();
File parent = f.getParentFile();
File dir = null;
while (parent != null && FileUtil.canWrite(parent) &&
parent.toString().startsWith(device)) {
dir = parent;
parent = parent.getParentFile();
}
if (dir==null) {
throw new IOException(
"not able to find the highest writable parent dir");
}
// move the file there
File badDir = new File(dir, "bad_files");
if (!badDir.mkdirs()) {
if (!badDir.isDirectory()) {
throw new IOException("Mkdirs failed to create " + badDir.toString());
}
}
String suffix = "." + rand.nextInt();
File badFile = new File(badDir, f.getName()+suffix);
LOG.warn("Moving bad file " + f + " to " + badFile);
in.close(); // close it first
boolean b = f.renameTo(badFile); // rename it
if (!b) {
LOG.warn("Ignoring failure of renameTo");
}
// move checksum file too
File checkFile = ((RawLocalFileSystem)fs).pathToFile(getChecksumFile(p));
// close the stream before rename to release the file handle
sums.close();
b = checkFile.renameTo(new File(badDir, checkFile.getName()+suffix));
if (!b) {
LOG.warn("Ignoring failure of renameTo");
}
} catch (IOException e) {
LOG.warn("Error moving bad file " + p + ": " + e);
}
return false;
}
@Override
public boolean supportsSymlinks() {
return true;
}
@Override
public void createSymlink(Path target, Path link, boolean createParent)
throws IOException {
fs.createSymlink(target, link, createParent);
}
@Override
public FileStatus getFileLinkStatus(final Path f) throws IOException {
return fs.getFileLinkStatus(f);
}
@Override
public Path getLinkTarget(Path f) throws IOException {
return fs.getLinkTarget(f);
}
}
| 5,213 | 30.035714 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.util.zip.Checksum;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
import java.nio.ByteBuffer;
import java.nio.IntBuffer;
/**
* This is a generic input stream for verifying checksums for
* data before it is read by a user.
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable
abstract public class FSInputChecker extends FSInputStream {
public static final Log LOG
= LogFactory.getLog(FSInputChecker.class);
/** The file name from which data is read from */
protected Path file;
private Checksum sum;
private boolean verifyChecksum = true;
private int maxChunkSize; // data bytes for checksum (eg 512)
private byte[] buf; // buffer for non-chunk-aligned reading
private byte[] checksum;
private IntBuffer checksumInts; // wrapper on checksum buffer
private int pos; // the position of the reader inside buf
private int count; // the number of bytes currently in buf
private int numOfRetries;
// cached file position
// this should always be a multiple of maxChunkSize
private long chunkPos = 0;
// Number of checksum chunks that can be read at once into a user
// buffer. Chosen by benchmarks - higher values do not reduce
// CPU usage. The size of the data reads made to the underlying stream
// will be CHUNKS_PER_READ * maxChunkSize.
private static final int CHUNKS_PER_READ = 32;
protected static final int CHECKSUM_SIZE = 4; // 32-bit checksum
/** Constructor
*
* @param file The name of the file to be read
* @param numOfRetries Number of read retries when ChecksumError occurs
*/
protected FSInputChecker( Path file, int numOfRetries) {
this.file = file;
this.numOfRetries = numOfRetries;
}
/** Constructor
*
* @param file The name of the file to be read
* @param numOfRetries Number of read retries when ChecksumError occurs
* @param sum the type of Checksum engine
* @param chunkSize maximun chunk size
* @param checksumSize the number byte of each checksum
*/
protected FSInputChecker( Path file, int numOfRetries,
boolean verifyChecksum, Checksum sum, int chunkSize, int checksumSize ) {
this(file, numOfRetries);
set(verifyChecksum, sum, chunkSize, checksumSize);
}
/**
* Reads in checksum chunks into <code>buf</code> at <code>offset</code>
* and checksum into <code>checksum</code>.
* Since checksums can be disabled, there are two cases implementors need
* to worry about:
*
* (a) needChecksum() will return false:
* - len can be any positive value
* - checksum will be null
* Implementors should simply pass through to the underlying data stream.
* or
* (b) needChecksum() will return true:
* - len >= maxChunkSize
* - checksum.length is a multiple of CHECKSUM_SIZE
* Implementors should read an integer number of data chunks into
* buf. The amount read should be bounded by len or by
* checksum.length / CHECKSUM_SIZE * maxChunkSize. Note that len may
* be a value that is not a multiple of maxChunkSize, in which case
* the implementation may return less than len.
*
* The method is used for implementing read, therefore, it should be optimized
* for sequential reading.
*
* @param pos chunkPos
* @param buf desitination buffer
* @param offset offset in buf at which to store data
* @param len maximum number of bytes to read
* @param checksum the data buffer into which to write checksums
* @return number of bytes read
*/
abstract protected int readChunk(long pos, byte[] buf, int offset, int len,
byte[] checksum) throws IOException;
/** Return position of beginning of chunk containing pos.
*
* @param pos a postion in the file
* @return the starting position of the chunk which contains the byte
*/
abstract protected long getChunkPosition(long pos);
/** Return true if there is a need for checksum verification */
protected synchronized boolean needChecksum() {
return verifyChecksum && sum != null;
}
/**
* Read one checksum-verified byte
*
* @return the next byte of data, or <code>-1</code> if the end of the
* stream is reached.
* @exception IOException if an I/O error occurs.
*/
@Override
public synchronized int read() throws IOException {
if (pos >= count) {
fill();
if (pos >= count) {
return -1;
}
}
return buf[pos++] & 0xff;
}
/**
* Read checksum verified bytes from this byte-input stream into
* the specified byte array, starting at the given offset.
*
* <p> This method implements the general contract of the corresponding
* <code>{@link InputStream#read(byte[], int, int) read}</code> method of
* the <code>{@link InputStream}</code> class. As an additional
* convenience, it attempts to read as many bytes as possible by repeatedly
* invoking the <code>read</code> method of the underlying stream. This
* iterated <code>read</code> continues until one of the following
* conditions becomes true: <ul>
*
* <li> The specified number of bytes have been read,
*
* <li> The <code>read</code> method of the underlying stream returns
* <code>-1</code>, indicating end-of-file.
*
* </ul> If the first <code>read</code> on the underlying stream returns
* <code>-1</code> to indicate end-of-file then this method returns
* <code>-1</code>. Otherwise this method returns the number of bytes
* actually read.
*
* @param b destination buffer.
* @param off offset at which to start storing bytes.
* @param len maximum number of bytes to read.
* @return the number of bytes read, or <code>-1</code> if the end of
* the stream has been reached.
* @exception IOException if an I/O error occurs.
* ChecksumException if any checksum error occurs
*/
@Override
public synchronized int read(byte[] b, int off, int len) throws IOException {
// parameter check
if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
int n = 0;
for (;;) {
int nread = read1(b, off + n, len - n);
if (nread <= 0)
return (n == 0) ? nread : n;
n += nread;
if (n >= len)
return n;
}
}
/**
* Fills the buffer with a chunk data.
* No mark is supported.
* This method assumes that all data in the buffer has already been read in,
* hence pos > count.
*/
private void fill( ) throws IOException {
assert(pos>=count);
// fill internal buffer
count = readChecksumChunk(buf, 0, maxChunkSize);
if (count < 0) count = 0;
}
/**
* Like read(byte[], int, int), but does not provide a dest buffer,
* so the read data is discarded.
* @param len maximum number of bytes to read.
* @return the number of bytes read.
* @throws IOException if an I/O error occurs.
*/
final protected synchronized int readAndDiscard(int len) throws IOException {
int total = 0;
while (total < len) {
if (pos >= count) {
count = readChecksumChunk(buf, 0, maxChunkSize);
if (count <= 0) {
break;
}
}
int rd = Math.min(count - pos, len - total);
pos += rd;
total += rd;
}
return total;
}
/*
* Read characters into a portion of an array, reading from the underlying
* stream at most once if necessary.
*/
private int read1(byte b[], int off, int len)
throws IOException {
int avail = count-pos;
if( avail <= 0 ) {
if(len >= maxChunkSize) {
// read a chunk to user buffer directly; avoid one copy
int nread = readChecksumChunk(b, off, len);
return nread;
} else {
// read a chunk into the local buffer
fill();
if( count <= 0 ) {
return -1;
} else {
avail = count;
}
}
}
// copy content of the local buffer to the user buffer
int cnt = (avail < len) ? avail : len;
System.arraycopy(buf, pos, b, off, cnt);
pos += cnt;
return cnt;
}
/* Read up one or more checksum chunk to array <i>b</i> at pos <i>off</i>
* It requires at least one checksum chunk boundary
* in between <cur_pos, cur_pos+len>
* and it stops reading at the last boundary or at the end of the stream;
* Otherwise an IllegalArgumentException is thrown.
* This makes sure that all data read are checksum verified.
*
* @param b the buffer into which the data is read.
* @param off the start offset in array <code>b</code>
* at which the data is written.
* @param len the maximum number of bytes to read.
* @return the total number of bytes read into the buffer, or
* <code>-1</code> if there is no more data because the end of
* the stream has been reached.
* @throws IOException if an I/O error occurs.
*/
private int readChecksumChunk(byte b[], final int off, final int len)
throws IOException {
// invalidate buffer
count = pos = 0;
int read = 0;
boolean retry = true;
int retriesLeft = numOfRetries;
do {
retriesLeft--;
try {
read = readChunk(chunkPos, b, off, len, checksum);
if( read > 0) {
if( needChecksum() ) {
verifySums(b, off, read);
}
chunkPos += read;
}
retry = false;
} catch (ChecksumException ce) {
LOG.info("Found checksum error: b[" + off + ", " + (off+read) + "]="
+ StringUtils.byteToHexString(b, off, off + read), ce);
if (retriesLeft == 0) {
throw ce;
}
// try a new replica
if (seekToNewSource(chunkPos)) {
// Since at least one of the sources is different,
// the read might succeed, so we'll retry.
seek(chunkPos);
} else {
// Neither the data stream nor the checksum stream are being read
// from different sources, meaning we'll still get a checksum error
// if we try to do the read again. We throw an exception instead.
throw ce;
}
}
} while (retry);
return read;
}
private void verifySums(final byte b[], final int off, int read)
throws ChecksumException
{
int leftToVerify = read;
int verifyOff = 0;
checksumInts.rewind();
checksumInts.limit((read - 1)/maxChunkSize + 1);
while (leftToVerify > 0) {
sum.update(b, off + verifyOff, Math.min(leftToVerify, maxChunkSize));
int expected = checksumInts.get();
int calculated = (int)sum.getValue();
sum.reset();
if (expected != calculated) {
long errPos = chunkPos + verifyOff;
throw new ChecksumException(
"Checksum error: "+file+" at "+ errPos +
" exp: " + expected + " got: " + calculated, errPos);
}
leftToVerify -= maxChunkSize;
verifyOff += maxChunkSize;
}
}
/**
* Convert a checksum byte array to a long
* This is deprecated since 0.22 since it is no longer in use
* by this class.
*/
@Deprecated
static public long checksum2long(byte[] checksum) {
long crc = 0L;
for(int i=0; i<checksum.length; i++) {
crc |= (0xffL&(long)checksum[i])<<((checksum.length-i-1)*8);
}
return crc;
}
@Override
public synchronized long getPos() throws IOException {
return chunkPos-Math.max(0L, count - pos);
}
@Override
public synchronized int available() throws IOException {
return Math.max(0, count - pos);
}
/**
* Skips over and discards <code>n</code> bytes of data from the
* input stream.
*
* <p>This method may skip more bytes than are remaining in the backing
* file. This produces no exception and the number of bytes skipped
* may include some number of bytes that were beyond the EOF of the
* backing file. Attempting to read from the stream after skipping past
* the end will result in -1 indicating the end of the file.
*
*<p>If <code>n</code> is negative, no bytes are skipped.
*
* @param n the number of bytes to be skipped.
* @return the actual number of bytes skipped.
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to skip to is corrupted
*/
@Override
public synchronized long skip(long n) throws IOException {
if (n <= 0) {
return 0;
}
seek(getPos()+n);
return n;
}
/**
* Seek to the given position in the stream.
* The next read() will be from that position.
*
* <p>This method may seek past the end of the file.
* This produces no exception and an attempt to read from
* the stream will result in -1 indicating the end of the file.
*
* @param pos the postion to seek to.
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to seek to is corrupted
*/
@Override
public synchronized void seek(long pos) throws IOException {
if( pos < 0 ) {
throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
}
// optimize: check if the pos is in the buffer
long start = chunkPos - this.count;
if( pos>=start && pos<chunkPos) {
this.pos = (int)(pos-start);
return;
}
// reset the current state
resetState();
// seek to a checksum boundary
chunkPos = getChunkPosition(pos);
// scan to the desired position
int delta = (int)(pos - chunkPos);
if( delta > 0) {
readFully(this, new byte[delta], 0, delta);
}
}
/**
* A utility function that tries to read up to <code>len</code> bytes from
* <code>stm</code>
*
* @param stm an input stream
* @param buf destiniation buffer
* @param offset offset at which to store data
* @param len number of bytes to read
* @return actual number of bytes read
* @throws IOException if there is any IO error
*/
protected static int readFully(InputStream stm,
byte[] buf, int offset, int len) throws IOException {
int n = 0;
for (;;) {
int nread = stm.read(buf, offset + n, len - n);
if (nread <= 0)
return (n == 0) ? nread : n;
n += nread;
if (n >= len)
return n;
}
}
/**
* Set the checksum related parameters
* @param verifyChecksum whether to verify checksum
* @param sum which type of checksum to use
* @param maxChunkSize maximun chunk size
* @param checksumSize checksum size
*/
final protected synchronized void set(boolean verifyChecksum,
Checksum sum, int maxChunkSize, int checksumSize) {
// The code makes assumptions that checksums are always 32-bit.
assert !verifyChecksum || sum == null || checksumSize == CHECKSUM_SIZE;
this.maxChunkSize = maxChunkSize;
this.verifyChecksum = verifyChecksum;
this.sum = sum;
this.buf = new byte[maxChunkSize];
// The size of the checksum array here determines how much we can
// read in a single call to readChunk
this.checksum = new byte[CHUNKS_PER_READ * checksumSize];
this.checksumInts = ByteBuffer.wrap(checksum).asIntBuffer();
this.count = 0;
this.pos = 0;
}
@Override
final public boolean markSupported() {
return false;
}
@Override
final public void mark(int readlimit) {
}
@Override
final public void reset() throws IOException {
throw new IOException("mark/reset not supported");
}
/* reset this FSInputChecker's state */
private void resetState() {
// invalidate buffer
count = 0;
pos = 0;
// reset Checksum
if (sum != null) {
sum.reset();
}
}
}
| 17,022 | 31.799615 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.Shell;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
/** Filesystem disk space usage statistics. Uses the unix 'du' program*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class DU extends Shell {
private String dirPath;
private AtomicLong used = new AtomicLong();
private volatile boolean shouldRun = true;
private Thread refreshUsed;
private IOException duException = null;
private long refreshInterval;
/**
* Keeps track of disk usage.
* @param path the path to check disk usage in
* @param interval refresh the disk usage at this interval
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, long interval) throws IOException {
this(path, interval, -1L);
}
/**
* Keeps track of disk usage.
* @param path the path to check disk usage in
* @param interval refresh the disk usage at this interval
* @param initialUsed use this value until next refresh
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, long interval, long initialUsed) throws IOException {
super(0);
//we set the Shell interval to 0 so it will always run our command
//and use this one to set the thread sleep interval
this.refreshInterval = interval;
this.dirPath = path.getCanonicalPath();
//populate the used variable if the initial value is not specified.
if (initialUsed < 0) {
run();
} else {
this.used.set(initialUsed);
}
}
/**
* Keeps track of disk usage.
* @param path the path to check disk usage in
* @param conf configuration object
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, Configuration conf) throws IOException {
this(path, conf, -1L);
}
/**
* Keeps track of disk usage.
* @param path the path to check disk usage in
* @param conf configuration object
* @param initialUsed use it until the next refresh.
* @throws IOException if we fail to refresh the disk usage
*/
public DU(File path, Configuration conf, long initialUsed)
throws IOException {
this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT), initialUsed);
}
/**
* This thread refreshes the "used" variable.
*
* Future improvements could be to not permanently
* run this thread, instead run when getUsed is called.
**/
class DURefreshThread implements Runnable {
@Override
public void run() {
while(shouldRun) {
try {
Thread.sleep(refreshInterval);
try {
//update the used variable
DU.this.run();
} catch (IOException e) {
synchronized (DU.this) {
//save the latest exception so we can return it in getUsed()
duException = e;
}
LOG.warn("Could not get disk usage information", e);
}
} catch (InterruptedException e) {
}
}
}
}
/**
* Decrease how much disk space we use.
* @param value decrease by this value
*/
public void decDfsUsed(long value) {
used.addAndGet(-value);
}
/**
* Increase how much disk space we use.
* @param value increase by this value
*/
public void incDfsUsed(long value) {
used.addAndGet(value);
}
/**
* @return disk space used
* @throws IOException if the shell command fails
*/
public long getUsed() throws IOException {
//if the updating thread isn't started, update on demand
if(refreshUsed == null) {
run();
} else {
synchronized (DU.this) {
//if an exception was thrown in the last run, rethrow
if(duException != null) {
IOException tmp = duException;
duException = null;
throw tmp;
}
}
}
return Math.max(used.longValue(), 0L);
}
/**
* @return the path of which we're keeping track of disk usage
*/
public String getDirPath() {
return dirPath;
}
/**
* Override to hook in DUHelper class. Maybe this can be used more
* generally as well on Unix/Linux based systems
*/
@Override
protected void run() throws IOException {
if (WINDOWS) {
used.set(DUHelper.getFolderUsage(dirPath));
return;
}
super.run();
}
/**
* Start the disk usage checking thread.
*/
public void start() {
//only start the thread if the interval is sane
if(refreshInterval > 0) {
refreshUsed = new Thread(new DURefreshThread(),
"refreshUsed-"+dirPath);
refreshUsed.setDaemon(true);
refreshUsed.start();
}
}
/**
* Shut down the refreshing thread.
*/
public void shutdown() {
this.shouldRun = false;
if(this.refreshUsed != null) {
this.refreshUsed.interrupt();
}
}
@Override
public String toString() {
return
"du -sk " + dirPath +"\n" +
used + "\t" + dirPath;
}
@Override
protected String[] getExecString() {
return new String[] {"du", "-sk", dirPath};
}
@Override
protected void parseExecResult(BufferedReader lines) throws IOException {
String line = lines.readLine();
if (line == null) {
throw new IOException("Expecting a line not the end of stream");
}
String[] tokens = line.split("\t");
if(tokens.length == 0) {
throw new IOException("Illegal du output");
}
this.used.set(Long.parseLong(tokens[0])*1024);
}
public static void main(String[] args) throws Exception {
String path = ".";
if (args.length > 0) {
path = args[0];
}
System.out.println(new DU(new File(path), new Configuration()).toString());
}
}
| 6,991 | 26.856574 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
|
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.ByteBufferPool;
import org.apache.hadoop.fs.ByteBufferUtil;
import org.apache.hadoop.util.IdentityHashStore;
/** Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
* and buffers input through a {@link BufferedInputStream}. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FSDataInputStream extends DataInputStream
implements Seekable, PositionedReadable,
ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead,
HasEnhancedByteBufferAccess, CanUnbuffer {
/**
* Map ByteBuffers that we have handed out to readers to ByteBufferPool
* objects
*/
private final IdentityHashStore<ByteBuffer, ByteBufferPool>
extendedReadBuffers
= new IdentityHashStore<ByteBuffer, ByteBufferPool>(0);
public FSDataInputStream(InputStream in) {
super(in);
if( !(in instanceof Seekable) || !(in instanceof PositionedReadable) ) {
throw new IllegalArgumentException(
"In is not an instance of Seekable or PositionedReadable");
}
}
/**
* Seek to the given offset.
*
* @param desired offset to seek to
*/
@Override
public void seek(long desired) throws IOException {
((Seekable)in).seek(desired);
}
/**
* Get the current position in the input stream.
*
* @return current position in the input stream
*/
@Override
public long getPos() throws IOException {
return ((Seekable)in).getPos();
}
/**
* Read bytes from the given position in the stream to the given buffer.
*
* @param position position in the input stream to seek
* @param buffer buffer into which data is read
* @param offset offset into the buffer in which data is written
* @param length maximum number of bytes to read
* @return total number of bytes read into the buffer, or <code>-1</code>
* if there is no more data because the end of the stream has been
* reached
*/
@Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
return ((PositionedReadable)in).read(position, buffer, offset, length);
}
/**
* Read bytes from the given position in the stream to the given buffer.
* Continues to read until <code>length</code> bytes have been read.
*
* @param position position in the input stream to seek
* @param buffer buffer into which data is read
* @param offset offset into the buffer in which data is written
* @param length the number of bytes to read
* @throws EOFException If the end of stream is reached while reading.
* If an exception is thrown an undetermined number
* of bytes in the buffer may have been written.
*/
@Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
((PositionedReadable)in).readFully(position, buffer, offset, length);
}
/**
* See {@link #readFully(long, byte[], int, int)}.
*/
@Override
public void readFully(long position, byte[] buffer)
throws IOException {
((PositionedReadable)in).readFully(position, buffer, 0, buffer.length);
}
/**
* Seek to the given position on an alternate copy of the data.
*
* @param targetPos position to seek to
* @return true if a new source is found, false otherwise
*/
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return ((Seekable)in).seekToNewSource(targetPos);
}
/**
* Get a reference to the wrapped input stream. Used by unit tests.
*
* @return the underlying input stream
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
public InputStream getWrappedStream() {
return in;
}
@Override
public int read(ByteBuffer buf) throws IOException {
if (in instanceof ByteBufferReadable) {
return ((ByteBufferReadable)in).read(buf);
}
throw new UnsupportedOperationException("Byte-buffer read unsupported by input stream");
}
@Override
public FileDescriptor getFileDescriptor() throws IOException {
if (in instanceof HasFileDescriptor) {
return ((HasFileDescriptor) in).getFileDescriptor();
} else if (in instanceof FileInputStream) {
return ((FileInputStream) in).getFD();
} else {
return null;
}
}
@Override
public void setReadahead(Long readahead)
throws IOException, UnsupportedOperationException {
try {
((CanSetReadahead)in).setReadahead(readahead);
} catch (ClassCastException e) {
throw new UnsupportedOperationException(
"this stream does not support setting the readahead " +
"caching strategy.");
}
}
@Override
public void setDropBehind(Boolean dropBehind)
throws IOException, UnsupportedOperationException {
try {
((CanSetDropBehind)in).setDropBehind(dropBehind);
} catch (ClassCastException e) {
throw new UnsupportedOperationException("this stream does not " +
"support setting the drop-behind caching setting.");
}
}
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
EnumSet<ReadOption> opts)
throws IOException, UnsupportedOperationException {
try {
return ((HasEnhancedByteBufferAccess)in).read(bufferPool,
maxLength, opts);
}
catch (ClassCastException e) {
ByteBuffer buffer = ByteBufferUtil.
fallbackRead(this, bufferPool, maxLength);
if (buffer != null) {
extendedReadBuffers.put(buffer, bufferPool);
}
return buffer;
}
}
private static final EnumSet<ReadOption> EMPTY_READ_OPTIONS_SET =
EnumSet.noneOf(ReadOption.class);
final public ByteBuffer read(ByteBufferPool bufferPool, int maxLength)
throws IOException, UnsupportedOperationException {
return read(bufferPool, maxLength, EMPTY_READ_OPTIONS_SET);
}
@Override
public void releaseBuffer(ByteBuffer buffer) {
try {
((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer);
}
catch (ClassCastException e) {
ByteBufferPool bufferPool = extendedReadBuffers.remove( buffer);
if (bufferPool == null) {
throw new IllegalArgumentException("tried to release a buffer " +
"that was not created by this stream.");
}
bufferPool.putBuffer(buffer);
}
}
@Override
public void unbuffer() {
try {
((CanUnbuffer)in).unbuffer();
} catch (ClassCastException e) {
throw new UnsupportedOperationException("this stream does not " +
"support unbuffering.");
}
}
}
| 7,688 | 31.858974 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import java.util.*;
import org.apache.commons.logging.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/** An implementation of a round-robin scheme for disk allocation for creating
* files. The way it works is that it is kept track what disk was last
* allocated for a file write. For the current request, the next disk from
* the set of disks would be allocated if the free space on the disk is
* sufficient enough to accommodate the file that is being considered for
* creation. If the space requirements cannot be met, the next disk in order
* would be tried and so on till a disk is found with sufficient capacity.
* Once a disk with sufficient space is identified, a check is done to make
* sure that the disk is writable. Also, there is an API provided that doesn't
* take the space requirements into consideration but just checks whether the
* disk under consideration is writable (this should be used for cases where
* the file size is not known apriori). An API is provided to read a path that
* was created earlier. That API works by doing a scan of all the disks for the
* input pathname.
* This implementation also provides the functionality of having multiple
* allocators per JVM (one for each unique functionality or context, like
* mapred, dfs-client, etc.). It ensures that there is only one instance of
* an allocator per context per JVM.
* Note:
* 1. The contexts referred above are actually the configuration items defined
* in the Configuration class like "mapred.local.dir" (for which we want to
* control the dir allocations). The context-strings are exactly those
* configuration items.
* 2. This implementation does not take into consideration cases where
* a disk becomes read-only or goes out of space while a file is being written
* to (disks are shared between multiple processes, and so the latter situation
* is probable).
* 3. In the class implementation, "Disk" is referred to as "Dir", which
* actually points to the configured directory on the Disk which will be the
* parent for all file write/read allocations.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Unstable
public class LocalDirAllocator {
//A Map from the config item names like "mapred.local.dir"
//to the instance of the AllocatorPerContext. This
//is a static object to make sure there exists exactly one instance per JVM
private static Map <String, AllocatorPerContext> contexts =
new TreeMap<String, AllocatorPerContext>();
private String contextCfgItemName;
/** Used when size of file to be allocated is unknown. */
public static final int SIZE_UNKNOWN = -1;
/**Create an allocator object
* @param contextCfgItemName
*/
public LocalDirAllocator(String contextCfgItemName) {
this.contextCfgItemName = contextCfgItemName;
}
/** This method must be used to obtain the dir allocation context for a
* particular value of the context name. The context name must be an item
* defined in the Configuration object for which we want to control the
* dir allocations (e.g., <code>mapred.local.dir</code>). The method will
* create a context for that name if it doesn't already exist.
*/
private AllocatorPerContext obtainContext(String contextCfgItemName) {
synchronized (contexts) {
AllocatorPerContext l = contexts.get(contextCfgItemName);
if (l == null) {
contexts.put(contextCfgItemName,
(l = new AllocatorPerContext(contextCfgItemName)));
}
return l;
}
}
/** Get a path from the local FS. This method should be used if the size of
* the file is not known apriori. We go round-robin over the set of disks
* (via the configured dirs) and return the first complete path where
* we could create the parent directory of the passed path.
* @param pathStr the requested path (this will be created on the first
* available disk)
* @param conf the Configuration object
* @return the complete path to the file on a local disk
* @throws IOException
*/
public Path getLocalPathForWrite(String pathStr,
Configuration conf) throws IOException {
return getLocalPathForWrite(pathStr, SIZE_UNKNOWN, conf);
}
/** Get a path from the local FS. Pass size as
* SIZE_UNKNOWN if not known apriori. We
* round-robin over the set of disks (via the configured dirs) and return
* the first complete path which has enough space
* @param pathStr the requested path (this will be created on the first
* available disk)
* @param size the size of the file that is going to be written
* @param conf the Configuration object
* @return the complete path to the file on a local disk
* @throws IOException
*/
public Path getLocalPathForWrite(String pathStr, long size,
Configuration conf) throws IOException {
return getLocalPathForWrite(pathStr, size, conf, true);
}
/** Get a path from the local FS. Pass size as
* SIZE_UNKNOWN if not known apriori. We
* round-robin over the set of disks (via the configured dirs) and return
* the first complete path which has enough space
* @param pathStr the requested path (this will be created on the first
* available disk)
* @param size the size of the file that is going to be written
* @param conf the Configuration object
* @param checkWrite ensure that the path is writable
* @return the complete path to the file on a local disk
* @throws IOException
*/
public Path getLocalPathForWrite(String pathStr, long size,
Configuration conf,
boolean checkWrite) throws IOException {
AllocatorPerContext context = obtainContext(contextCfgItemName);
return context.getLocalPathForWrite(pathStr, size, conf, checkWrite);
}
/** Get a path from the local FS for reading. We search through all the
* configured dirs for the file's existence and return the complete
* path to the file when we find one
* @param pathStr the requested file (this will be searched)
* @param conf the Configuration object
* @return the complete path to the file on a local disk
* @throws IOException
*/
public Path getLocalPathToRead(String pathStr,
Configuration conf) throws IOException {
AllocatorPerContext context = obtainContext(contextCfgItemName);
return context.getLocalPathToRead(pathStr, conf);
}
/**
* Get all of the paths that currently exist in the working directories.
* @param pathStr the path underneath the roots
* @param conf the configuration to look up the roots in
* @return all of the paths that exist under any of the roots
* @throws IOException
*/
public Iterable<Path> getAllLocalPathsToRead(String pathStr,
Configuration conf
) throws IOException {
AllocatorPerContext context;
synchronized (this) {
context = obtainContext(contextCfgItemName);
}
return context.getAllLocalPathsToRead(pathStr, conf);
}
/** Creates a temporary file in the local FS. Pass size as -1 if not known
* apriori. We round-robin over the set of disks (via the configured dirs)
* and select the first complete path which has enough space. A file is
* created on this directory. The file is guaranteed to go away when the
* JVM exits.
* @param pathStr prefix for the temporary file
* @param size the size of the file that is going to be written
* @param conf the Configuration object
* @return a unique temporary file
* @throws IOException
*/
public File createTmpFileForWrite(String pathStr, long size,
Configuration conf) throws IOException {
AllocatorPerContext context = obtainContext(contextCfgItemName);
return context.createTmpFileForWrite(pathStr, size, conf);
}
/** Method to check whether a context is valid
* @param contextCfgItemName
* @return true/false
*/
public static boolean isContextValid(String contextCfgItemName) {
synchronized (contexts) {
return contexts.containsKey(contextCfgItemName);
}
}
/**
* Removes the context from the context config items
*
* @param contextCfgItemName
*/
@Deprecated
@InterfaceAudience.LimitedPrivate({"MapReduce"})
public static void removeContext(String contextCfgItemName) {
synchronized (contexts) {
contexts.remove(contextCfgItemName);
}
}
/** We search through all the configured dirs for the file's existence
* and return true when we find
* @param pathStr the requested file (this will be searched)
* @param conf the Configuration object
* @return true if files exist. false otherwise
* @throws IOException
*/
public boolean ifExists(String pathStr,Configuration conf) {
AllocatorPerContext context = obtainContext(contextCfgItemName);
return context.ifExists(pathStr, conf);
}
/**
* Get the current directory index for the given configuration item.
* @return the current directory index for the given configuration item.
*/
int getCurrentDirectoryIndex() {
AllocatorPerContext context = obtainContext(contextCfgItemName);
return context.getCurrentDirectoryIndex();
}
private static class AllocatorPerContext {
private final Log LOG =
LogFactory.getLog(AllocatorPerContext.class);
private int dirNumLastAccessed;
private Random dirIndexRandomizer = new Random();
private FileSystem localFS;
private DF[] dirDF;
private String contextCfgItemName;
private String[] localDirs;
private String savedLocalDirs = "";
public AllocatorPerContext(String contextCfgItemName) {
this.contextCfgItemName = contextCfgItemName;
}
/** This method gets called everytime before any read/write to make sure
* that any change to localDirs is reflected immediately.
*/
private synchronized void confChanged(Configuration conf)
throws IOException {
String newLocalDirs = conf.get(contextCfgItemName);
if (!newLocalDirs.equals(savedLocalDirs)) {
localDirs = StringUtils.getTrimmedStrings(newLocalDirs);
localFS = FileSystem.getLocal(conf);
int numDirs = localDirs.length;
ArrayList<String> dirs = new ArrayList<String>(numDirs);
ArrayList<DF> dfList = new ArrayList<DF>(numDirs);
for (int i = 0; i < numDirs; i++) {
try {
// filter problematic directories
Path tmpDir = new Path(localDirs[i]);
if(localFS.mkdirs(tmpDir)|| localFS.exists(tmpDir)) {
try {
File tmpFile = tmpDir.isAbsolute()
? new File(localFS.makeQualified(tmpDir).toUri())
: new File(localDirs[i]);
DiskChecker.checkDir(tmpFile);
dirs.add(tmpFile.getPath());
dfList.add(new DF(tmpFile, 30000));
} catch (DiskErrorException de) {
LOG.warn( localDirs[i] + " is not writable\n", de);
}
} else {
LOG.warn( "Failed to create " + localDirs[i]);
}
} catch (IOException ie) {
LOG.warn( "Failed to create " + localDirs[i] + ": " +
ie.getMessage() + "\n", ie);
} //ignore
}
localDirs = dirs.toArray(new String[dirs.size()]);
dirDF = dfList.toArray(new DF[dirs.size()]);
savedLocalDirs = newLocalDirs;
// randomize the first disk picked in the round-robin selection
dirNumLastAccessed = dirIndexRandomizer.nextInt(dirs.size());
}
}
private Path createPath(String path,
boolean checkWrite) throws IOException {
Path file = new Path(new Path(localDirs[dirNumLastAccessed]),
path);
if (checkWrite) {
//check whether we are able to create a directory here. If the disk
//happens to be RDONLY we will fail
try {
DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
return file;
} catch (DiskErrorException d) {
LOG.warn("Disk Error Exception: ", d);
return null;
}
}
return file;
}
/**
* Get the current directory index.
* @return the current directory index.
*/
int getCurrentDirectoryIndex() {
return dirNumLastAccessed;
}
/** Get a path from the local FS. If size is known, we go
* round-robin over the set of disks (via the configured dirs) and return
* the first complete path which has enough space.
*
* If size is not known, use roulette selection -- pick directories
* with probability proportional to their available space.
*/
public synchronized Path getLocalPathForWrite(String pathStr, long size,
Configuration conf, boolean checkWrite) throws IOException {
confChanged(conf);
int numDirs = localDirs.length;
int numDirsSearched = 0;
//remove the leading slash from the path (to make sure that the uri
//resolution results in a valid path on the dir being checked)
if (pathStr.startsWith("/")) {
pathStr = pathStr.substring(1);
}
Path returnPath = null;
if(size == SIZE_UNKNOWN) { //do roulette selection: pick dir with probability
//proportional to available size
long[] availableOnDisk = new long[dirDF.length];
long totalAvailable = 0;
//build the "roulette wheel"
for(int i =0; i < dirDF.length; ++i) {
availableOnDisk[i] = dirDF[i].getAvailable();
totalAvailable += availableOnDisk[i];
}
if (totalAvailable == 0){
throw new DiskErrorException("No space available in any of the local directories.");
}
// Keep rolling the wheel till we get a valid path
Random r = new java.util.Random();
while (numDirsSearched < numDirs && returnPath == null) {
long randomPosition = (r.nextLong() >>> 1) % totalAvailable;
int dir = 0;
while (randomPosition > availableOnDisk[dir]) {
randomPosition -= availableOnDisk[dir];
dir++;
}
dirNumLastAccessed = dir;
returnPath = createPath(pathStr, checkWrite);
if (returnPath == null) {
totalAvailable -= availableOnDisk[dir];
availableOnDisk[dir] = 0; // skip this disk
numDirsSearched++;
}
}
} else {
while (numDirsSearched < numDirs && returnPath == null) {
long capacity = dirDF[dirNumLastAccessed].getAvailable();
if (capacity > size) {
returnPath = createPath(pathStr, checkWrite);
}
dirNumLastAccessed++;
dirNumLastAccessed = dirNumLastAccessed % numDirs;
numDirsSearched++;
}
}
if (returnPath != null) {
return returnPath;
}
//no path found
throw new DiskErrorException("Could not find any valid local " +
"directory for " + pathStr);
}
/** Creates a file on the local FS. Pass size as
* {@link LocalDirAllocator.SIZE_UNKNOWN} if not known apriori. We
* round-robin over the set of disks (via the configured dirs) and return
* a file on the first path which has enough space. The file is guaranteed
* to go away when the JVM exits.
*/
public File createTmpFileForWrite(String pathStr, long size,
Configuration conf) throws IOException {
// find an appropriate directory
Path path = getLocalPathForWrite(pathStr, size, conf, true);
File dir = new File(path.getParent().toUri().getPath());
String prefix = path.getName();
// create a temp file on this directory
File result = File.createTempFile(prefix, null, dir);
result.deleteOnExit();
return result;
}
/** Get a path from the local FS for reading. We search through all the
* configured dirs for the file's existence and return the complete
* path to the file when we find one
*/
public synchronized Path getLocalPathToRead(String pathStr,
Configuration conf) throws IOException {
confChanged(conf);
int numDirs = localDirs.length;
int numDirsSearched = 0;
//remove the leading slash from the path (to make sure that the uri
//resolution results in a valid path on the dir being checked)
if (pathStr.startsWith("/")) {
pathStr = pathStr.substring(1);
}
while (numDirsSearched < numDirs) {
Path file = new Path(localDirs[numDirsSearched], pathStr);
if (localFS.exists(file)) {
return file;
}
numDirsSearched++;
}
//no path found
throw new DiskErrorException ("Could not find " + pathStr +" in any of" +
" the configured local directories");
}
private static class PathIterator implements Iterator<Path>, Iterable<Path> {
private final FileSystem fs;
private final String pathStr;
private int i = 0;
private final String[] rootDirs;
private Path next = null;
private PathIterator(FileSystem fs, String pathStr, String[] rootDirs)
throws IOException {
this.fs = fs;
this.pathStr = pathStr;
this.rootDirs = rootDirs;
advance();
}
@Override
public boolean hasNext() {
return next != null;
}
private void advance() throws IOException {
while (i < rootDirs.length) {
next = new Path(rootDirs[i++], pathStr);
if (fs.exists(next)) {
return;
}
}
next = null;
}
@Override
public Path next() {
final Path result = next;
try {
advance();
} catch (IOException ie) {
throw new RuntimeException("Can't check existance of " + next, ie);
}
if (result == null) {
throw new NoSuchElementException();
}
return result;
}
@Override
public void remove() {
throw new UnsupportedOperationException("read only iterator");
}
@Override
public Iterator<Path> iterator() {
return this;
}
}
/**
* Get all of the paths that currently exist in the working directories.
* @param pathStr the path underneath the roots
* @param conf the configuration to look up the roots in
* @return all of the paths that exist under any of the roots
* @throws IOException
*/
synchronized Iterable<Path> getAllLocalPathsToRead(String pathStr,
Configuration conf) throws IOException {
confChanged(conf);
if (pathStr.startsWith("/")) {
pathStr = pathStr.substring(1);
}
return new PathIterator(localFS, pathStr, localDirs);
}
/** We search through all the configured dirs for the file's existence
* and return true when we find one
*/
public synchronized boolean ifExists(String pathStr,Configuration conf) {
try {
int numDirs = localDirs.length;
int numDirsSearched = 0;
//remove the leading slash from the path (to make sure that the uri
//resolution results in a valid path on the dir being checked)
if (pathStr.startsWith("/")) {
pathStr = pathStr.substring(1);
}
while (numDirsSearched < numDirs) {
Path file = new Path(localDirs[numDirsSearched], pathStr);
if (localFS.exists(file)) {
return true;
}
numDirsSearched++;
}
} catch (IOException e) {
// IGNORE and try again
}
return false;
}
}
}
| 21,100 | 37.365455 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** Thrown for checksum errors. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class ChecksumException extends IOException {
private static final long serialVersionUID = 1L;
private long pos;
public ChecksumException(String description, long pos) {
super(description);
this.pos = pos;
}
public long getPos() {
return pos;
}
}
| 1,357 | 32.121951 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ZeroCopyUnavailableException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
public class ZeroCopyUnavailableException extends IOException {
private static final long serialVersionUID = 0L;
public ZeroCopyUnavailableException(String message) {
super(message);
}
public ZeroCopyUnavailableException(String message, Exception e) {
super(message, e);
}
public ZeroCopyUnavailableException(Exception e) {
super(e);
}
}
| 1,234 | 32.378378 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
/** Interface that represents the client side information for a file.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FileStatus implements Writable, Comparable<FileStatus> {
private Path path;
private long length;
private boolean isdir;
private short block_replication;
private long blocksize;
private long modification_time;
private long access_time;
private FsPermission permission;
private String owner;
private String group;
private Path symlink;
public FileStatus() { this(0, false, 0, 0, 0, 0, null, null, null, null); }
//We should deprecate this soon?
public FileStatus(long length, boolean isdir, int block_replication,
long blocksize, long modification_time, Path path) {
this(length, isdir, block_replication, blocksize, modification_time,
0, null, null, null, path);
}
/**
* Constructor for file systems on which symbolic links are not supported
*/
public FileStatus(long length, boolean isdir,
int block_replication,
long blocksize, long modification_time, long access_time,
FsPermission permission, String owner, String group,
Path path) {
this(length, isdir, block_replication, blocksize, modification_time,
access_time, permission, owner, group, null, path);
}
public FileStatus(long length, boolean isdir,
int block_replication,
long blocksize, long modification_time, long access_time,
FsPermission permission, String owner, String group,
Path symlink,
Path path) {
this.length = length;
this.isdir = isdir;
this.block_replication = (short)block_replication;
this.blocksize = blocksize;
this.modification_time = modification_time;
this.access_time = access_time;
if (permission != null) {
this.permission = permission;
} else if (isdir) {
this.permission = FsPermission.getDirDefault();
} else if (symlink!=null) {
this.permission = FsPermission.getDefault();
} else {
this.permission = FsPermission.getFileDefault();
}
this.owner = (owner == null) ? "" : owner;
this.group = (group == null) ? "" : group;
this.symlink = symlink;
this.path = path;
// The variables isdir and symlink indicate the type:
// 1. isdir implies directory, in which case symlink must be null.
// 2. !isdir implies a file or symlink, symlink != null implies a
// symlink, otherwise it's a file.
assert (isdir && symlink == null) || !isdir;
}
/**
* Copy constructor.
*
* @param other FileStatus to copy
*/
public FileStatus(FileStatus other) throws IOException {
// It's important to call the getters here instead of directly accessing the
// members. Subclasses like ViewFsFileStatus can override the getters.
this(other.getLen(), other.isDirectory(), other.getReplication(),
other.getBlockSize(), other.getModificationTime(), other.getAccessTime(),
other.getPermission(), other.getOwner(), other.getGroup(),
(other.isSymlink() ? other.getSymlink() : null),
other.getPath());
}
/**
* Get the length of this file, in bytes.
* @return the length of this file, in bytes.
*/
public long getLen() {
return length;
}
/**
* Is this a file?
* @return true if this is a file
*/
public boolean isFile() {
return !isdir && !isSymlink();
}
/**
* Is this a directory?
* @return true if this is a directory
*/
public boolean isDirectory() {
return isdir;
}
/**
* Old interface, instead use the explicit {@link FileStatus#isFile()},
* {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
* @return true if this is a directory.
* @deprecated Use {@link FileStatus#isFile()},
* {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
* instead.
*/
@Deprecated
public boolean isDir() {
return isdir;
}
/**
* Is this a symbolic link?
* @return true if this is a symbolic link
*/
public boolean isSymlink() {
return symlink != null;
}
/**
* Get the block size of the file.
* @return the number of bytes
*/
public long getBlockSize() {
return blocksize;
}
/**
* Get the replication factor of a file.
* @return the replication factor of a file.
*/
public short getReplication() {
return block_replication;
}
/**
* Get the modification time of the file.
* @return the modification time of file in milliseconds since January 1, 1970 UTC.
*/
public long getModificationTime() {
return modification_time;
}
/**
* Get the access time of the file.
* @return the access time of file in milliseconds since January 1, 1970 UTC.
*/
public long getAccessTime() {
return access_time;
}
/**
* Get FsPermission associated with the file.
* @return permssion. If a filesystem does not have a notion of permissions
* or if permissions could not be determined, then default
* permissions equivalent of "rwxrwxrwx" is returned.
*/
public FsPermission getPermission() {
return permission;
}
/**
* Tell whether the underlying file or directory is encrypted or not.
*
* @return true if the underlying file is encrypted.
*/
public boolean isEncrypted() {
return permission.getEncryptedBit();
}
/**
* Get the owner of the file.
* @return owner of the file. The string could be empty if there is no
* notion of owner of a file in a filesystem or if it could not
* be determined (rare).
*/
public String getOwner() {
return owner;
}
/**
* Get the group associated with the file.
* @return group for the file. The string could be empty if there is no
* notion of group of a file in a filesystem or if it could not
* be determined (rare).
*/
public String getGroup() {
return group;
}
public Path getPath() {
return path;
}
public void setPath(final Path p) {
path = p;
}
/* These are provided so that these values could be loaded lazily
* by a filesystem (e.g. local file system).
*/
/**
* Sets permission.
* @param permission if permission is null, default value is set
*/
protected void setPermission(FsPermission permission) {
this.permission = (permission == null) ?
FsPermission.getFileDefault() : permission;
}
/**
* Sets owner.
* @param owner if it is null, default value is set
*/
protected void setOwner(String owner) {
this.owner = (owner == null) ? "" : owner;
}
/**
* Sets group.
* @param group if it is null, default value is set
*/
protected void setGroup(String group) {
this.group = (group == null) ? "" : group;
}
/**
* @return The contents of the symbolic link.
*/
public Path getSymlink() throws IOException {
if (!isSymlink()) {
throw new IOException("Path " + path + " is not a symbolic link");
}
return symlink;
}
public void setSymlink(final Path p) {
symlink = p;
}
//////////////////////////////////////////////////
// Writable
//////////////////////////////////////////////////
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, getPath().toString(), Text.DEFAULT_MAX_LEN);
out.writeLong(getLen());
out.writeBoolean(isDirectory());
out.writeShort(getReplication());
out.writeLong(getBlockSize());
out.writeLong(getModificationTime());
out.writeLong(getAccessTime());
getPermission().write(out);
Text.writeString(out, getOwner(), Text.DEFAULT_MAX_LEN);
Text.writeString(out, getGroup(), Text.DEFAULT_MAX_LEN);
out.writeBoolean(isSymlink());
if (isSymlink()) {
Text.writeString(out, getSymlink().toString(), Text.DEFAULT_MAX_LEN);
}
}
@Override
public void readFields(DataInput in) throws IOException {
String strPath = Text.readString(in, Text.DEFAULT_MAX_LEN);
this.path = new Path(strPath);
this.length = in.readLong();
this.isdir = in.readBoolean();
this.block_replication = in.readShort();
blocksize = in.readLong();
modification_time = in.readLong();
access_time = in.readLong();
permission.readFields(in);
owner = Text.readString(in, Text.DEFAULT_MAX_LEN);
group = Text.readString(in, Text.DEFAULT_MAX_LEN);
if (in.readBoolean()) {
this.symlink = new Path(Text.readString(in, Text.DEFAULT_MAX_LEN));
} else {
this.symlink = null;
}
}
/**
* Compare this FileStatus to another FileStatus
* @param o the FileStatus to be compared.
* @return a negative integer, zero, or a positive integer as this object
* is less than, equal to, or greater than the specified object.
*/
@Override
public int compareTo(FileStatus o) {
return this.getPath().compareTo(o.getPath());
}
/** Compare if this object is equal to another object
* @param o the object to be compared.
* @return true if two file status has the same path name; false if not.
*/
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (this == o) {
return true;
}
if (!(o instanceof FileStatus)) {
return false;
}
FileStatus other = (FileStatus)o;
return this.getPath().equals(other.getPath());
}
/**
* Returns a hash code value for the object, which is defined as
* the hash code of the path name.
*
* @return a hash code value for the path name.
*/
@Override
public int hashCode() {
return getPath().hashCode();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getClass().getSimpleName());
sb.append("{");
sb.append("path=" + path);
sb.append("; isDirectory=" + isdir);
if(!isDirectory()){
sb.append("; length=" + length);
sb.append("; replication=" + block_replication);
sb.append("; blocksize=" + blocksize);
}
sb.append("; modification_time=" + modification_time);
sb.append("; access_time=" + access_time);
sb.append("; owner=" + owner);
sb.append("; group=" + group);
sb.append("; permission=" + permission);
sb.append("; isSymlink=" + isSymlink());
if(isSymlink()) {
sb.append("; symlink=" + symlink);
}
sb.append("}");
return sb.toString();
}
}
| 11,756 | 29.069054 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlStreamHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.net.URL;
import java.net.URLStreamHandler;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* URLStream handler relying on FileSystem and on a given Configuration to
* handle URL protocols.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class FsUrlStreamHandler extends URLStreamHandler {
private Configuration conf;
FsUrlStreamHandler(Configuration conf) {
this.conf = conf;
}
FsUrlStreamHandler() {
this.conf = new Configuration();
}
@Override
protected FsUrlConnection openConnection(URL url) throws IOException {
return new FsUrlConnection(conf, url);
}
}
| 1,607 | 29.923077 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InvalidPathException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Path string is invalid either because it has invalid characters or due to
* other file system specific reasons.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class InvalidPathException extends HadoopIllegalArgumentException {
private static final long serialVersionUID = 1L;
/**
* Constructs exception with the specified detail message.
*
* @param path invalid path.
*/
public InvalidPathException(final String path) {
super("Invalid path name " + path);
}
/**
* Constructs exception with the specified detail message.
*
* @param path invalid path.
* @param reason Reason <code>path</code> is invalid
*/
public InvalidPathException(final String path, final String reason) {
super("Invalid path " + path
+ (reason == null ? "" : ". (" + reason + ")"));
}
}
| 1,858 | 34.075472 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import java.net.InetAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import java.util.jar.Attributes;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import java.util.zip.GZIPInputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import org.apache.commons.collections.map.CaseInsensitiveMap;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A collection of file-processing util methods
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class FileUtil {
private static final Log LOG = LogFactory.getLog(FileUtil.class);
/* The error code is defined in winutils to indicate insufficient
* privilege to create symbolic links. This value need to keep in
* sync with the constant of the same name in:
* "src\winutils\common.h"
* */
public static final int SYMLINK_NO_PRIVILEGE = 2;
/**
* convert an array of FileStatus to an array of Path
*
* @param stats
* an array of FileStatus objects
* @return an array of paths corresponding to the input
*/
public static Path[] stat2Paths(FileStatus[] stats) {
if (stats == null)
return null;
Path[] ret = new Path[stats.length];
for (int i = 0; i < stats.length; ++i) {
ret[i] = stats[i].getPath();
}
return ret;
}
/**
* convert an array of FileStatus to an array of Path.
* If stats if null, return path
* @param stats
* an array of FileStatus objects
* @param path
* default path to return in stats is null
* @return an array of paths corresponding to the input
*/
public static Path[] stat2Paths(FileStatus[] stats, Path path) {
if (stats == null)
return new Path[]{path};
else
return stat2Paths(stats);
}
/**
* Delete a directory and all its contents. If
* we return false, the directory may be partially-deleted.
* (1) If dir is symlink to a file, the symlink is deleted. The file pointed
* to by the symlink is not deleted.
* (2) If dir is symlink to a directory, symlink is deleted. The directory
* pointed to by symlink is not deleted.
* (3) If dir is a normal file, it is deleted.
* (4) If dir is a normal directory, then dir and all its contents recursively
* are deleted.
*/
public static boolean fullyDelete(final File dir) {
return fullyDelete(dir, false);
}
/**
* Delete a directory and all its contents. If
* we return false, the directory may be partially-deleted.
* (1) If dir is symlink to a file, the symlink is deleted. The file pointed
* to by the symlink is not deleted.
* (2) If dir is symlink to a directory, symlink is deleted. The directory
* pointed to by symlink is not deleted.
* (3) If dir is a normal file, it is deleted.
* (4) If dir is a normal directory, then dir and all its contents recursively
* are deleted.
* @param dir the file or directory to be deleted
* @param tryGrantPermissions true if permissions should be modified to delete a file.
* @return true on success false on failure.
*/
public static boolean fullyDelete(final File dir, boolean tryGrantPermissions) {
if (tryGrantPermissions) {
// try to chmod +rwx the parent folder of the 'dir':
File parent = dir.getParentFile();
grantPermissions(parent);
}
if (deleteImpl(dir, false)) {
// dir is (a) normal file, (b) symlink to a file, (c) empty directory or
// (d) symlink to a directory
return true;
}
// handle nonempty directory deletion
if (!fullyDeleteContents(dir, tryGrantPermissions)) {
return false;
}
return deleteImpl(dir, true);
}
/**
* Returns the target of the given symlink. Returns the empty string if
* the given path does not refer to a symlink or there is an error
* accessing the symlink.
* @param f File representing the symbolic link.
* @return The target of the symbolic link, empty string on error or if not
* a symlink.
*/
public static String readLink(File f) {
/* NB: Use readSymbolicLink in java.nio.file.Path once available. Could
* use getCanonicalPath in File to get the target of the symlink but that
* does not indicate if the given path refers to a symlink.
*/
try {
return Shell.execCommand(
Shell.getReadlinkCommand(f.toString())).trim();
} catch (IOException x) {
return "";
}
}
/*
* Pure-Java implementation of "chmod +rwx f".
*/
private static void grantPermissions(final File f) {
FileUtil.setExecutable(f, true);
FileUtil.setReadable(f, true);
FileUtil.setWritable(f, true);
}
private static boolean deleteImpl(final File f, final boolean doLog) {
if (f == null) {
LOG.warn("null file argument.");
return false;
}
final boolean wasDeleted = f.delete();
if (wasDeleted) {
return true;
}
final boolean ex = f.exists();
if (doLog && ex) {
LOG.warn("Failed to delete file or dir ["
+ f.getAbsolutePath() + "]: it still exists.");
}
return !ex;
}
/**
* Delete the contents of a directory, not the directory itself. If
* we return false, the directory may be partially-deleted.
* If dir is a symlink to a directory, all the contents of the actual
* directory pointed to by dir will be deleted.
*/
public static boolean fullyDeleteContents(final File dir) {
return fullyDeleteContents(dir, false);
}
/**
* Delete the contents of a directory, not the directory itself. If
* we return false, the directory may be partially-deleted.
* If dir is a symlink to a directory, all the contents of the actual
* directory pointed to by dir will be deleted.
* @param tryGrantPermissions if 'true', try grant +rwx permissions to this
* and all the underlying directories before trying to delete their contents.
*/
public static boolean fullyDeleteContents(final File dir, final boolean tryGrantPermissions) {
if (tryGrantPermissions) {
// to be able to list the dir and delete files from it
// we must grant the dir rwx permissions:
grantPermissions(dir);
}
boolean deletionSucceeded = true;
final File[] contents = dir.listFiles();
if (contents != null) {
for (int i = 0; i < contents.length; i++) {
if (contents[i].isFile()) {
if (!deleteImpl(contents[i], true)) {// normal file or symlink to another file
deletionSucceeded = false;
continue; // continue deletion of other files/dirs under dir
}
} else {
// Either directory or symlink to another directory.
// Try deleting the directory as this might be a symlink
boolean b = false;
b = deleteImpl(contents[i], false);
if (b){
//this was indeed a symlink or an empty directory
continue;
}
// if not an empty directory or symlink let
// fullydelete handle it.
if (!fullyDelete(contents[i], tryGrantPermissions)) {
deletionSucceeded = false;
// continue deletion of other files/dirs under dir
}
}
}
}
return deletionSucceeded;
}
/**
* Recursively delete a directory.
*
* @param fs {@link FileSystem} on which the path is present
* @param dir directory to recursively delete
* @throws IOException
* @deprecated Use {@link FileSystem#delete(Path, boolean)}
*/
@Deprecated
public static void fullyDelete(FileSystem fs, Path dir)
throws IOException {
fs.delete(dir, true);
}
//
// If the destination is a subdirectory of the source, then
// generate exception
//
private static void checkDependencies(FileSystem srcFS,
Path src,
FileSystem dstFS,
Path dst)
throws IOException {
if (srcFS == dstFS) {
String srcq = src.makeQualified(srcFS).toString() + Path.SEPARATOR;
String dstq = dst.makeQualified(dstFS).toString() + Path.SEPARATOR;
if (dstq.startsWith(srcq)) {
if (srcq.length() == dstq.length()) {
throw new IOException("Cannot copy " + src + " to itself.");
} else {
throw new IOException("Cannot copy " + src + " to its subdirectory " +
dst);
}
}
}
}
/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, Path src,
FileSystem dstFS, Path dst,
boolean deleteSource,
Configuration conf) throws IOException {
return copy(srcFS, src, dstFS, dst, deleteSource, true, conf);
}
public static boolean copy(FileSystem srcFS, Path[] srcs,
FileSystem dstFS, Path dst,
boolean deleteSource,
boolean overwrite, Configuration conf)
throws IOException {
boolean gotException = false;
boolean returnVal = true;
StringBuilder exceptions = new StringBuilder();
if (srcs.length == 1)
return copy(srcFS, srcs[0], dstFS, dst, deleteSource, overwrite, conf);
// Check if dest is directory
if (!dstFS.exists(dst)) {
throw new IOException("`" + dst +"': specified destination directory " +
"does not exist");
} else {
FileStatus sdst = dstFS.getFileStatus(dst);
if (!sdst.isDirectory())
throw new IOException("copying multiple files, but last argument `" +
dst + "' is not a directory");
}
for (Path src : srcs) {
try {
if (!copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf))
returnVal = false;
} catch (IOException e) {
gotException = true;
exceptions.append(e.getMessage());
exceptions.append("\n");
}
}
if (gotException) {
throw new IOException(exceptions.toString());
}
return returnVal;
}
/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, Path src,
FileSystem dstFS, Path dst,
boolean deleteSource,
boolean overwrite,
Configuration conf) throws IOException {
FileStatus fileStatus = srcFS.getFileStatus(src);
return copy(srcFS, fileStatus, dstFS, dst, deleteSource, overwrite, conf);
}
/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, FileStatus srcStatus,
FileSystem dstFS, Path dst,
boolean deleteSource,
boolean overwrite,
Configuration conf) throws IOException {
Path src = srcStatus.getPath();
dst = checkDest(src.getName(), dstFS, dst, overwrite);
if (srcStatus.isDirectory()) {
checkDependencies(srcFS, src, dstFS, dst);
if (!dstFS.mkdirs(dst)) {
return false;
}
FileStatus contents[] = srcFS.listStatus(src);
for (int i = 0; i < contents.length; i++) {
copy(srcFS, contents[i], dstFS,
new Path(dst, contents[i].getPath().getName()),
deleteSource, overwrite, conf);
}
} else {
InputStream in=null;
OutputStream out = null;
try {
in = srcFS.open(src);
out = dstFS.create(dst, overwrite);
IOUtils.copyBytes(in, out, conf, true);
} catch (IOException e) {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
throw e;
}
}
if (deleteSource) {
return srcFS.delete(src, true);
} else {
return true;
}
}
/** Copy all files in a directory to one output file (merge). */
public static boolean copyMerge(FileSystem srcFS, Path srcDir,
FileSystem dstFS, Path dstFile,
boolean deleteSource,
Configuration conf, String addString) throws IOException {
dstFile = checkDest(srcDir.getName(), dstFS, dstFile, false);
if (!srcFS.getFileStatus(srcDir).isDirectory())
return false;
OutputStream out = dstFS.create(dstFile);
try {
FileStatus contents[] = srcFS.listStatus(srcDir);
Arrays.sort(contents);
for (int i = 0; i < contents.length; i++) {
if (contents[i].isFile()) {
InputStream in = srcFS.open(contents[i].getPath());
try {
IOUtils.copyBytes(in, out, conf, false);
if (addString!=null)
out.write(addString.getBytes("UTF-8"));
} finally {
in.close();
}
}
}
} finally {
out.close();
}
if (deleteSource) {
return srcFS.delete(srcDir, true);
} else {
return true;
}
}
/** Copy local files to a FileSystem. */
public static boolean copy(File src,
FileSystem dstFS, Path dst,
boolean deleteSource,
Configuration conf) throws IOException {
dst = checkDest(src.getName(), dstFS, dst, false);
if (src.isDirectory()) {
if (!dstFS.mkdirs(dst)) {
return false;
}
File contents[] = listFiles(src);
for (int i = 0; i < contents.length; i++) {
copy(contents[i], dstFS, new Path(dst, contents[i].getName()),
deleteSource, conf);
}
} else if (src.isFile()) {
InputStream in = null;
OutputStream out =null;
try {
in = new FileInputStream(src);
out = dstFS.create(dst);
IOUtils.copyBytes(in, out, conf);
} catch (IOException e) {
IOUtils.closeStream( out );
IOUtils.closeStream( in );
throw e;
}
} else {
throw new IOException(src.toString() +
": No such file or directory");
}
if (deleteSource) {
return FileUtil.fullyDelete(src);
} else {
return true;
}
}
/** Copy FileSystem files to local files. */
public static boolean copy(FileSystem srcFS, Path src,
File dst, boolean deleteSource,
Configuration conf) throws IOException {
FileStatus filestatus = srcFS.getFileStatus(src);
return copy(srcFS, filestatus, dst, deleteSource, conf);
}
/** Copy FileSystem files to local files. */
private static boolean copy(FileSystem srcFS, FileStatus srcStatus,
File dst, boolean deleteSource,
Configuration conf) throws IOException {
Path src = srcStatus.getPath();
if (srcStatus.isDirectory()) {
if (!dst.mkdirs()) {
return false;
}
FileStatus contents[] = srcFS.listStatus(src);
for (int i = 0; i < contents.length; i++) {
copy(srcFS, contents[i],
new File(dst, contents[i].getPath().getName()),
deleteSource, conf);
}
} else {
InputStream in = srcFS.open(src);
IOUtils.copyBytes(in, new FileOutputStream(dst), conf);
}
if (deleteSource) {
return srcFS.delete(src, true);
} else {
return true;
}
}
private static Path checkDest(String srcName, FileSystem dstFS, Path dst,
boolean overwrite) throws IOException {
if (dstFS.exists(dst)) {
FileStatus sdst = dstFS.getFileStatus(dst);
if (sdst.isDirectory()) {
if (null == srcName) {
throw new IOException("Target " + dst + " is a directory");
}
return checkDest(null, dstFS, new Path(dst, srcName), overwrite);
} else if (!overwrite) {
throw new IOException("Target " + dst + " already exists");
}
}
return dst;
}
/**
* Convert a os-native filename to a path that works for the shell.
* @param filename The filename to convert
* @return The unix pathname
* @throws IOException on windows, there can be problems with the subprocess
*/
public static String makeShellPath(String filename) throws IOException {
return filename;
}
/**
* Convert a os-native filename to a path that works for the shell.
* @param file The filename to convert
* @return The unix pathname
* @throws IOException on windows, there can be problems with the subprocess
*/
public static String makeShellPath(File file) throws IOException {
return makeShellPath(file, false);
}
/**
* Convert a os-native filename to a path that works for the shell.
* @param file The filename to convert
* @param makeCanonicalPath
* Whether to make canonical path for the file passed
* @return The unix pathname
* @throws IOException on windows, there can be problems with the subprocess
*/
public static String makeShellPath(File file, boolean makeCanonicalPath)
throws IOException {
if (makeCanonicalPath) {
return makeShellPath(file.getCanonicalPath());
} else {
return makeShellPath(file.toString());
}
}
/**
* Takes an input dir and returns the du on that local directory. Very basic
* implementation.
*
* @param dir
* The input dir to get the disk space of this local dir
* @return The total disk space of the input local directory
*/
public static long getDU(File dir) {
long size = 0;
if (!dir.exists())
return 0;
if (!dir.isDirectory()) {
return dir.length();
} else {
File[] allFiles = dir.listFiles();
if(allFiles != null) {
for (int i = 0; i < allFiles.length; i++) {
boolean isSymLink;
try {
isSymLink = org.apache.commons.io.FileUtils.isSymlink(allFiles[i]);
} catch(IOException ioe) {
isSymLink = true;
}
if(!isSymLink) {
size += getDU(allFiles[i]);
}
}
}
return size;
}
}
/**
* Given a File input it will unzip the file in a the unzip directory
* passed as the second parameter
* @param inFile The zip file as input
* @param unzipDir The unzip directory where to unzip the zip file.
* @throws IOException
*/
public static void unZip(File inFile, File unzipDir) throws IOException {
Enumeration<? extends ZipEntry> entries;
ZipFile zipFile = new ZipFile(inFile);
try {
entries = zipFile.entries();
while (entries.hasMoreElements()) {
ZipEntry entry = entries.nextElement();
if (!entry.isDirectory()) {
InputStream in = zipFile.getInputStream(entry);
try {
File file = new File(unzipDir, entry.getName());
if (!file.getParentFile().mkdirs()) {
if (!file.getParentFile().isDirectory()) {
throw new IOException("Mkdirs failed to create " +
file.getParentFile().toString());
}
}
OutputStream out = new FileOutputStream(file);
try {
byte[] buffer = new byte[8192];
int i;
while ((i = in.read(buffer)) != -1) {
out.write(buffer, 0, i);
}
} finally {
out.close();
}
} finally {
in.close();
}
}
}
} finally {
zipFile.close();
}
}
/**
* Given a Tar File as input it will untar the file in a the untar directory
* passed as the second parameter
*
* This utility will untar ".tar" files and ".tar.gz","tgz" files.
*
* @param inFile The tar file as input.
* @param untarDir The untar directory where to untar the tar file.
* @throws IOException
*/
public static void unTar(File inFile, File untarDir) throws IOException {
if (!untarDir.mkdirs()) {
if (!untarDir.isDirectory()) {
throw new IOException("Mkdirs failed to create " + untarDir);
}
}
boolean gzipped = inFile.toString().endsWith("gz");
if(Shell.WINDOWS) {
// Tar is not native to Windows. Use simple Java based implementation for
// tests and simple tar archives
unTarUsingJava(inFile, untarDir, gzipped);
}
else {
// spawn tar utility to untar archive for full fledged unix behavior such
// as resolving symlinks in tar archives
unTarUsingTar(inFile, untarDir, gzipped);
}
}
private static void unTarUsingTar(File inFile, File untarDir,
boolean gzipped) throws IOException {
StringBuffer untarCommand = new StringBuffer();
if (gzipped) {
untarCommand.append(" gzip -dc '");
untarCommand.append(FileUtil.makeShellPath(inFile));
untarCommand.append("' | (");
}
untarCommand.append("cd '");
untarCommand.append(FileUtil.makeShellPath(untarDir));
untarCommand.append("' ; ");
untarCommand.append("tar -xf ");
if (gzipped) {
untarCommand.append(" -)");
} else {
untarCommand.append(FileUtil.makeShellPath(inFile));
}
String[] shellCmd = { "bash", "-c", untarCommand.toString() };
ShellCommandExecutor shexec = new ShellCommandExecutor(shellCmd);
shexec.execute();
int exitcode = shexec.getExitCode();
if (exitcode != 0) {
throw new IOException("Error untarring file " + inFile +
". Tar process exited with exit code " + exitcode);
}
}
private static void unTarUsingJava(File inFile, File untarDir,
boolean gzipped) throws IOException {
InputStream inputStream = null;
TarArchiveInputStream tis = null;
try {
if (gzipped) {
inputStream = new BufferedInputStream(new GZIPInputStream(
new FileInputStream(inFile)));
} else {
inputStream = new BufferedInputStream(new FileInputStream(inFile));
}
tis = new TarArchiveInputStream(inputStream);
for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) {
unpackEntries(tis, entry, untarDir);
entry = tis.getNextTarEntry();
}
} finally {
IOUtils.cleanup(LOG, tis, inputStream);
}
}
private static void unpackEntries(TarArchiveInputStream tis,
TarArchiveEntry entry, File outputDir) throws IOException {
if (entry.isDirectory()) {
File subDir = new File(outputDir, entry.getName());
if (!subDir.mkdirs() && !subDir.isDirectory()) {
throw new IOException("Mkdirs failed to create tar internal dir "
+ outputDir);
}
for (TarArchiveEntry e : entry.getDirectoryEntries()) {
unpackEntries(tis, e, subDir);
}
return;
}
File outputFile = new File(outputDir, entry.getName());
if (!outputFile.getParentFile().exists()) {
if (!outputFile.getParentFile().mkdirs()) {
throw new IOException("Mkdirs failed to create tar internal dir "
+ outputDir);
}
}
if (entry.isLink()) {
File src = new File(outputDir, entry.getLinkName());
HardLink.createHardLink(src, outputFile);
return;
}
int count;
byte data[] = new byte[2048];
BufferedOutputStream outputStream = new BufferedOutputStream(
new FileOutputStream(outputFile));
while ((count = tis.read(data)) != -1) {
outputStream.write(data, 0, count);
}
outputStream.flush();
outputStream.close();
}
/**
* Class for creating hardlinks.
* Supports Unix, WindXP.
* @deprecated Use {@link org.apache.hadoop.fs.HardLink}
*/
@Deprecated
public static class HardLink extends org.apache.hadoop.fs.HardLink {
// This is a stub to assist with coordinated change between
// COMMON and HDFS projects. It will be removed after the
// corresponding change is committed to HDFS.
}
/**
* Create a soft link between a src and destination
* only on a local disk. HDFS does not support this.
* On Windows, when symlink creation fails due to security
* setting, we will log a warning. The return code in this
* case is 2.
*
* @param target the target for symlink
* @param linkname the symlink
* @return 0 on success
*/
public static int symLink(String target, String linkname) throws IOException{
// Run the input paths through Java's File so that they are converted to the
// native OS form
File targetFile = new File(
Path.getPathWithoutSchemeAndAuthority(new Path(target)).toString());
File linkFile = new File(
Path.getPathWithoutSchemeAndAuthority(new Path(linkname)).toString());
// If not on Java7+, copy a file instead of creating a symlink since
// Java6 has close to no support for symlinks on Windows. Specifically
// File#length and File#renameTo do not work as expected.
// (see HADOOP-9061 for additional details)
// We still create symlinks for directories, since the scenario in this
// case is different. The directory content could change in which
// case the symlink loses its purpose (for example task attempt log folder
// is symlinked under userlogs and userlogs are generated afterwards).
if (Shell.WINDOWS && !Shell.isJava7OrAbove() && targetFile.isFile()) {
try {
LOG.warn("FileUtil#symlink: On Windows+Java6, copying file instead " +
"of creating a symlink. Copying " + target + " -> " + linkname);
if (!linkFile.getParentFile().exists()) {
LOG.warn("Parent directory " + linkFile.getParent() +
" does not exist.");
return 1;
} else {
org.apache.commons.io.FileUtils.copyFile(targetFile, linkFile);
}
} catch (IOException ex) {
LOG.warn("FileUtil#symlink failed to copy the file with error: "
+ ex.getMessage());
// Exit with non-zero exit code
return 1;
}
return 0;
}
String[] cmd = Shell.getSymlinkCommand(
targetFile.toString(),
linkFile.toString());
ShellCommandExecutor shExec;
try {
if (Shell.WINDOWS &&
linkFile.getParentFile() != null &&
!new Path(target).isAbsolute()) {
// Relative links on Windows must be resolvable at the time of
// creation. To ensure this we run the shell command in the directory
// of the link.
//
shExec = new ShellCommandExecutor(cmd, linkFile.getParentFile());
} else {
shExec = new ShellCommandExecutor(cmd);
}
shExec.execute();
} catch (Shell.ExitCodeException ec) {
int returnVal = ec.getExitCode();
if (Shell.WINDOWS && returnVal == SYMLINK_NO_PRIVILEGE) {
LOG.warn("Fail to create symbolic links on Windows. "
+ "The default security settings in Windows disallow non-elevated "
+ "administrators and all non-administrators from creating symbolic links. "
+ "This behavior can be changed in the Local Security Policy management console");
} else if (returnVal != 0) {
LOG.warn("Command '" + StringUtils.join(" ", cmd) + "' failed "
+ returnVal + " with: " + ec.getMessage());
}
return returnVal;
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Error while create symlink " + linkname + " to " + target
+ "." + " Exception: " + StringUtils.stringifyException(e));
}
throw e;
}
return shExec.getExitCode();
}
/**
* Change the permissions on a filename.
* @param filename the name of the file to change
* @param perm the permission string
* @return the exit code from the command
* @throws IOException
* @throws InterruptedException
*/
public static int chmod(String filename, String perm
) throws IOException, InterruptedException {
return chmod(filename, perm, false);
}
/**
* Change the permissions on a file / directory, recursively, if
* needed.
* @param filename name of the file whose permissions are to change
* @param perm permission string
* @param recursive true, if permissions should be changed recursively
* @return the exit code from the command.
* @throws IOException
*/
public static int chmod(String filename, String perm, boolean recursive)
throws IOException {
String [] cmd = Shell.getSetPermissionCommand(perm, recursive);
String[] args = new String[cmd.length + 1];
System.arraycopy(cmd, 0, args, 0, cmd.length);
args[cmd.length] = new File(filename).getPath();
ShellCommandExecutor shExec = new ShellCommandExecutor(args);
try {
shExec.execute();
}catch(IOException e) {
if(LOG.isDebugEnabled()) {
LOG.debug("Error while changing permission : " + filename
+" Exception: " + StringUtils.stringifyException(e));
}
}
return shExec.getExitCode();
}
/**
* Set the ownership on a file / directory. User name and group name
* cannot both be null.
* @param file the file to change
* @param username the new user owner name
* @param groupname the new group owner name
* @throws IOException
*/
public static void setOwner(File file, String username,
String groupname) throws IOException {
if (username == null && groupname == null) {
throw new IOException("username == null && groupname == null");
}
String arg = (username == null ? "" : username)
+ (groupname == null ? "" : ":" + groupname);
String [] cmd = Shell.getSetOwnerCommand(arg);
execCommand(file, cmd);
}
/**
* Platform independent implementation for {@link File#setReadable(boolean)}
* File#setReadable does not work as expected on Windows.
* @param f input file
* @param readable
* @return true on success, false otherwise
*/
public static boolean setReadable(File f, boolean readable) {
if (Shell.WINDOWS) {
try {
String permission = readable ? "u+r" : "u-r";
FileUtil.chmod(f.getCanonicalPath(), permission, false);
return true;
} catch (IOException ex) {
return false;
}
} else {
return f.setReadable(readable);
}
}
/**
* Platform independent implementation for {@link File#setWritable(boolean)}
* File#setWritable does not work as expected on Windows.
* @param f input file
* @param writable
* @return true on success, false otherwise
*/
public static boolean setWritable(File f, boolean writable) {
if (Shell.WINDOWS) {
try {
String permission = writable ? "u+w" : "u-w";
FileUtil.chmod(f.getCanonicalPath(), permission, false);
return true;
} catch (IOException ex) {
return false;
}
} else {
return f.setWritable(writable);
}
}
/**
* Platform independent implementation for {@link File#setExecutable(boolean)}
* File#setExecutable does not work as expected on Windows.
* Note: revoking execute permission on folders does not have the same
* behavior on Windows as on Unix platforms. Creating, deleting or renaming
* a file within that folder will still succeed on Windows.
* @param f input file
* @param executable
* @return true on success, false otherwise
*/
public static boolean setExecutable(File f, boolean executable) {
if (Shell.WINDOWS) {
try {
String permission = executable ? "u+x" : "u-x";
FileUtil.chmod(f.getCanonicalPath(), permission, false);
return true;
} catch (IOException ex) {
return false;
}
} else {
return f.setExecutable(executable);
}
}
/**
* Platform independent implementation for {@link File#canRead()}
* @param f input file
* @return On Unix, same as {@link File#canRead()}
* On Windows, true if process has read access on the path
*/
public static boolean canRead(File f) {
if (Shell.WINDOWS) {
try {
return NativeIO.Windows.access(f.getCanonicalPath(),
NativeIO.Windows.AccessRight.ACCESS_READ);
} catch (IOException e) {
return false;
}
} else {
return f.canRead();
}
}
/**
* Platform independent implementation for {@link File#canWrite()}
* @param f input file
* @return On Unix, same as {@link File#canWrite()}
* On Windows, true if process has write access on the path
*/
public static boolean canWrite(File f) {
if (Shell.WINDOWS) {
try {
return NativeIO.Windows.access(f.getCanonicalPath(),
NativeIO.Windows.AccessRight.ACCESS_WRITE);
} catch (IOException e) {
return false;
}
} else {
return f.canWrite();
}
}
/**
* Platform independent implementation for {@link File#canExecute()}
* @param f input file
* @return On Unix, same as {@link File#canExecute()}
* On Windows, true if process has execute access on the path
*/
public static boolean canExecute(File f) {
if (Shell.WINDOWS) {
try {
return NativeIO.Windows.access(f.getCanonicalPath(),
NativeIO.Windows.AccessRight.ACCESS_EXECUTE);
} catch (IOException e) {
return false;
}
} else {
return f.canExecute();
}
}
/**
* Set permissions to the required value. Uses the java primitives instead
* of forking if group == other.
* @param f the file to change
* @param permission the new permissions
* @throws IOException
*/
public static void setPermission(File f, FsPermission permission
) throws IOException {
FsAction user = permission.getUserAction();
FsAction group = permission.getGroupAction();
FsAction other = permission.getOtherAction();
// use the native/fork if the group/other permissions are different
// or if the native is available or on Windows
if (group != other || NativeIO.isAvailable() || Shell.WINDOWS) {
execSetPermission(f, permission);
return;
}
boolean rv = true;
// read perms
rv = f.setReadable(group.implies(FsAction.READ), false);
checkReturnValue(rv, f, permission);
if (group.implies(FsAction.READ) != user.implies(FsAction.READ)) {
rv = f.setReadable(user.implies(FsAction.READ), true);
checkReturnValue(rv, f, permission);
}
// write perms
rv = f.setWritable(group.implies(FsAction.WRITE), false);
checkReturnValue(rv, f, permission);
if (group.implies(FsAction.WRITE) != user.implies(FsAction.WRITE)) {
rv = f.setWritable(user.implies(FsAction.WRITE), true);
checkReturnValue(rv, f, permission);
}
// exec perms
rv = f.setExecutable(group.implies(FsAction.EXECUTE), false);
checkReturnValue(rv, f, permission);
if (group.implies(FsAction.EXECUTE) != user.implies(FsAction.EXECUTE)) {
rv = f.setExecutable(user.implies(FsAction.EXECUTE), true);
checkReturnValue(rv, f, permission);
}
}
private static void checkReturnValue(boolean rv, File p,
FsPermission permission
) throws IOException {
if (!rv) {
throw new IOException("Failed to set permissions of path: " + p +
" to " +
String.format("%04o", permission.toShort()));
}
}
private static void execSetPermission(File f,
FsPermission permission
) throws IOException {
if (NativeIO.isAvailable()) {
NativeIO.POSIX.chmod(f.getCanonicalPath(), permission.toShort());
} else {
execCommand(f, Shell.getSetPermissionCommand(
String.format("%04o", permission.toShort()), false));
}
}
static String execCommand(File f, String... cmd) throws IOException {
String[] args = new String[cmd.length + 1];
System.arraycopy(cmd, 0, args, 0, cmd.length);
args[cmd.length] = f.getCanonicalPath();
String output = Shell.execCommand(args);
return output;
}
/**
* Create a tmp file for a base file.
* @param basefile the base file of the tmp
* @param prefix file name prefix of tmp
* @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
* @return a newly created tmp file
* @exception IOException If a tmp file cannot created
* @see java.io.File#createTempFile(String, String, File)
* @see java.io.File#deleteOnExit()
*/
public static final File createLocalTempFile(final File basefile,
final String prefix,
final boolean isDeleteOnExit)
throws IOException {
File tmp = File.createTempFile(prefix + basefile.getName(),
"", basefile.getParentFile());
if (isDeleteOnExit) {
tmp.deleteOnExit();
}
return tmp;
}
/**
* Move the src file to the name specified by target.
* @param src the source file
* @param target the target file
* @exception IOException If this operation fails
*/
public static void replaceFile(File src, File target) throws IOException {
/* renameTo() has two limitations on Windows platform.
* src.renameTo(target) fails if
* 1) If target already exists OR
* 2) If target is already open for reading/writing.
*/
if (!src.renameTo(target)) {
int retries = 5;
while (target.exists() && !target.delete() && retries-- >= 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
throw new IOException("replaceFile interrupted.");
}
}
if (!src.renameTo(target)) {
throw new IOException("Unable to rename " + src +
" to " + target);
}
}
}
/**
* A wrapper for {@link File#listFiles()}. This java.io API returns null
* when a dir is not a directory or for any I/O error. Instead of having
* null check everywhere File#listFiles() is used, we will add utility API
* to get around this problem. For the majority of cases where we prefer
* an IOException to be thrown.
* @param dir directory for which listing should be performed
* @return list of files or empty list
* @exception IOException for invalid directory or for a bad disk.
*/
public static File[] listFiles(File dir) throws IOException {
File[] files = dir.listFiles();
if(files == null) {
throw new IOException("Invalid directory or I/O error occurred for dir: "
+ dir.toString());
}
return files;
}
/**
* A wrapper for {@link File#list()}. This java.io API returns null
* when a dir is not a directory or for any I/O error. Instead of having
* null check everywhere File#list() is used, we will add utility API
* to get around this problem. For the majority of cases where we prefer
* an IOException to be thrown.
* @param dir directory for which listing should be performed
* @return list of file names or empty string list
* @exception IOException for invalid directory or for a bad disk.
*/
public static String[] list(File dir) throws IOException {
String[] fileNames = dir.list();
if(fileNames == null) {
throw new IOException("Invalid directory or I/O error occurred for dir: "
+ dir.toString());
}
return fileNames;
}
public static String[] createJarWithClassPath(String inputClassPath, Path pwd,
Map<String, String> callerEnv) throws IOException {
return createJarWithClassPath(inputClassPath, pwd, pwd, callerEnv);
}
/**
* Create a jar file at the given path, containing a manifest with a classpath
* that references all specified entries.
*
* Some platforms may have an upper limit on command line length. For example,
* the maximum command line length on Windows is 8191 characters, but the
* length of the classpath may exceed this. To work around this limitation,
* use this method to create a small intermediate jar with a manifest that
* contains the full classpath. It returns the absolute path to the new jar,
* which the caller may set as the classpath for a new process.
*
* Environment variable evaluation is not supported within a jar manifest, so
* this method expands environment variables before inserting classpath entries
* to the manifest. The method parses environment variables according to
* platform-specific syntax (%VAR% on Windows, or $VAR otherwise). On Windows,
* environment variables are case-insensitive. For example, %VAR% and %var%
* evaluate to the same value.
*
* Specifying the classpath in a jar manifest does not support wildcards, so
* this method expands wildcards internally. Any classpath entry that ends
* with * is translated to all files at that path with extension .jar or .JAR.
*
* @param inputClassPath String input classpath to bundle into the jar manifest
* @param pwd Path to working directory to save jar
* @param targetDir path to where the jar execution will have its working dir
* @param callerEnv Map<String, String> caller's environment variables to use
* for expansion
* @return String[] with absolute path to new jar in position 0 and
* unexpanded wild card entry path in position 1
* @throws IOException if there is an I/O error while writing the jar file
*/
public static String[] createJarWithClassPath(String inputClassPath, Path pwd,
Path targetDir,
Map<String, String> callerEnv) throws IOException {
// Replace environment variables, case-insensitive on Windows
@SuppressWarnings("unchecked")
Map<String, String> env = Shell.WINDOWS ? new CaseInsensitiveMap(callerEnv) :
callerEnv;
String[] classPathEntries = inputClassPath.split(File.pathSeparator);
for (int i = 0; i < classPathEntries.length; ++i) {
classPathEntries[i] = StringUtils.replaceTokens(classPathEntries[i],
StringUtils.ENV_VAR_PATTERN, env);
}
File workingDir = new File(pwd.toString());
if (!workingDir.mkdirs()) {
// If mkdirs returns false because the working directory already exists,
// then this is acceptable. If it returns false due to some other I/O
// error, then this method will fail later with an IOException while saving
// the jar.
LOG.debug("mkdirs false for " + workingDir + ", execution will continue");
}
StringBuilder unexpandedWildcardClasspath = new StringBuilder();
// Append all entries
List<String> classPathEntryList = new ArrayList<String>(
classPathEntries.length);
for (String classPathEntry: classPathEntries) {
if (classPathEntry.length() == 0) {
continue;
}
if (classPathEntry.endsWith("*")) {
boolean foundWildCardJar = false;
// Append all jars that match the wildcard
Path globPath = new Path(classPathEntry).suffix("{.jar,.JAR}");
FileStatus[] wildcardJars = FileContext.getLocalFSFileContext().util()
.globStatus(globPath);
if (wildcardJars != null) {
for (FileStatus wildcardJar: wildcardJars) {
foundWildCardJar = true;
classPathEntryList.add(wildcardJar.getPath().toUri().toURL()
.toExternalForm());
}
}
if (!foundWildCardJar) {
unexpandedWildcardClasspath.append(File.pathSeparator);
unexpandedWildcardClasspath.append(classPathEntry);
}
} else {
// Append just this entry
File fileCpEntry = null;
if(!new Path(classPathEntry).isAbsolute()) {
fileCpEntry = new File(targetDir.toString(), classPathEntry);
}
else {
fileCpEntry = new File(classPathEntry);
}
String classPathEntryUrl = fileCpEntry.toURI().toURL()
.toExternalForm();
// File.toURI only appends trailing '/' if it can determine that it is a
// directory that already exists. (See JavaDocs.) If this entry had a
// trailing '/' specified by the caller, then guarantee that the
// classpath entry in the manifest has a trailing '/', and thus refers to
// a directory instead of a file. This can happen if the caller is
// creating a classpath jar referencing a directory that hasn't been
// created yet, but will definitely be created before running.
if (classPathEntry.endsWith(Path.SEPARATOR) &&
!classPathEntryUrl.endsWith(Path.SEPARATOR)) {
classPathEntryUrl = classPathEntryUrl + Path.SEPARATOR;
}
classPathEntryList.add(classPathEntryUrl);
}
}
String jarClassPath = StringUtils.join(" ", classPathEntryList);
// Create the manifest
Manifest jarManifest = new Manifest();
jarManifest.getMainAttributes().putValue(
Attributes.Name.MANIFEST_VERSION.toString(), "1.0");
jarManifest.getMainAttributes().putValue(
Attributes.Name.CLASS_PATH.toString(), jarClassPath);
// Write the manifest to output JAR file
File classPathJar = File.createTempFile("classpath-", ".jar", workingDir);
FileOutputStream fos = null;
BufferedOutputStream bos = null;
JarOutputStream jos = null;
try {
fos = new FileOutputStream(classPathJar);
bos = new BufferedOutputStream(fos);
jos = new JarOutputStream(bos, jarManifest);
} finally {
IOUtils.cleanup(LOG, jos, bos, fos);
}
String[] jarCp = {classPathJar.getCanonicalPath(),
unexpandedWildcardClasspath.toString()};
return jarCp;
}
public static boolean compareFs(FileSystem srcFs, FileSystem destFs) {
if (srcFs==null || destFs==null) {
return false;
}
URI srcUri = srcFs.getUri();
URI dstUri = destFs.getUri();
if (srcUri.getScheme()==null) {
return false;
}
if (!srcUri.getScheme().equals(dstUri.getScheme())) {
return false;
}
String srcHost = srcUri.getHost();
String dstHost = dstUri.getHost();
if ((srcHost!=null) && (dstHost!=null)) {
if (srcHost.equals(dstHost)) {
return srcUri.getPort()==dstUri.getPort();
}
try {
srcHost = InetAddress.getByName(srcHost).getCanonicalHostName();
dstHost = InetAddress.getByName(dstHost).getCanonicalHostName();
} catch (UnknownHostException ue) {
if (LOG.isDebugEnabled()) {
LOG.debug("Could not compare file-systems. Unknown host: ", ue);
}
return false;
}
if (!srcHost.equals(dstHost)) {
return false;
}
} else if (srcHost==null && dstHost!=null) {
return false;
} else if (srcHost!=null) {
return false;
}
// check for ports
return srcUri.getPort()==dstUri.getPort();
}
}
| 48,873 | 34.648432 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
/**
* Implementation of AbstractFileSystem based on the existing implementation of
* {@link FileSystem}.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class DelegateToFileSystem extends AbstractFileSystem {
protected final FileSystem fsImpl;
protected DelegateToFileSystem(URI theUri, FileSystem theFsImpl,
Configuration conf, String supportedScheme, boolean authorityRequired)
throws IOException, URISyntaxException {
super(theUri, supportedScheme, authorityRequired,
getDefaultPortIfDefined(theFsImpl));
fsImpl = theFsImpl;
fsImpl.initialize(theUri, conf);
fsImpl.statistics = getStatistics();
}
/**
* Returns the default port if the file system defines one.
* {@link FileSystem#getDefaultPort()} returns 0 to indicate the default port
* is undefined. However, the logic that consumes this value expects to
* receive -1 to indicate the port is undefined, which agrees with the
* contract of {@link URI#getPort()}.
*
* @param theFsImpl file system to check for default port
* @return default port, or -1 if default port is undefined
*/
private static int getDefaultPortIfDefined(FileSystem theFsImpl) {
int defaultPort = theFsImpl.getDefaultPort();
return defaultPort != 0 ? defaultPort : -1;
}
@Override
public Path getInitialWorkingDirectory() {
return fsImpl.getInitialWorkingDirectory();
}
@Override
@SuppressWarnings("deprecation") // call to primitiveCreate
public FSDataOutputStream createInternal (Path f,
EnumSet<CreateFlag> flag, FsPermission absolutePermission, int bufferSize,
short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt, boolean createParent) throws IOException {
checkPath(f);
// Default impl assumes that permissions do not matter
// calling the regular create is good enough.
// FSs that implement permissions should override this.
if (!createParent) { // parent must exist.
// since this.create makes parent dirs automatically
// we must throw exception if parent does not exist.
final FileStatus stat = getFileStatus(f.getParent());
if (stat == null) {
throw new FileNotFoundException("Missing parent:" + f);
}
if (!stat.isDirectory()) {
throw new ParentNotDirectoryException("parent is not a dir:" + f);
}
// parent does exist - go ahead with create of file.
}
return fsImpl.primitiveCreate(f, absolutePermission, flag,
bufferSize, replication, blockSize, progress, checksumOpt);
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
checkPath(f);
return fsImpl.delete(f, recursive);
}
@Override
public BlockLocation[] getFileBlockLocations(Path f, long start, long len)
throws IOException {
checkPath(f);
return fsImpl.getFileBlockLocations(f, start, len);
}
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
checkPath(f);
return fsImpl.getFileChecksum(f);
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
checkPath(f);
return fsImpl.getFileStatus(f);
}
@Override
public FileStatus getFileLinkStatus(final Path f) throws IOException {
FileStatus status = fsImpl.getFileLinkStatus(f);
// FileSystem#getFileLinkStatus qualifies the link target
// AbstractFileSystem needs to return it plain since it's qualified
// in FileContext, so re-get and set the plain target
if (status.isSymlink()) {
status.setSymlink(fsImpl.getLinkTarget(f));
}
return status;
}
@Override
public FsStatus getFsStatus() throws IOException {
return fsImpl.getStatus();
}
@Override
public FsStatus getFsStatus(final Path f) throws IOException {
return fsImpl.getStatus(f);
}
@Override
public FsServerDefaults getServerDefaults() throws IOException {
return fsImpl.getServerDefaults();
}
@Override
public Path getHomeDirectory() {
return fsImpl.getHomeDirectory();
}
@Override
public int getUriDefaultPort() {
return 0;
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
checkPath(f);
return fsImpl.listStatus(f);
}
@Override
@SuppressWarnings("deprecation") // call to primitiveMkdir
public void mkdir(Path dir, FsPermission permission, boolean createParent)
throws IOException {
checkPath(dir);
fsImpl.primitiveMkdir(dir, permission, createParent);
}
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
checkPath(f);
return fsImpl.open(f, bufferSize);
}
@Override
public boolean truncate(Path f, long newLength) throws IOException {
checkPath(f);
return fsImpl.truncate(f, newLength);
}
@Override
@SuppressWarnings("deprecation") // call to rename
public void renameInternal(Path src, Path dst) throws IOException {
checkPath(src);
checkPath(dst);
fsImpl.rename(src, dst, Options.Rename.NONE);
}
@Override
public void setOwner(Path f, String username, String groupname)
throws IOException {
checkPath(f);
fsImpl.setOwner(f, username, groupname);
}
@Override
public void setPermission(Path f, FsPermission permission)
throws IOException {
checkPath(f);
fsImpl.setPermission(f, permission);
}
@Override
public boolean setReplication(Path f, short replication)
throws IOException {
checkPath(f);
return fsImpl.setReplication(f, replication);
}
@Override
public void setTimes(Path f, long mtime, long atime) throws IOException {
checkPath(f);
fsImpl.setTimes(f, mtime, atime);
}
@Override
public void setVerifyChecksum(boolean verifyChecksum) throws IOException {
fsImpl.setVerifyChecksum(verifyChecksum);
}
@Override
public boolean supportsSymlinks() {
return fsImpl.supportsSymlinks();
}
@Override
public void createSymlink(Path target, Path link, boolean createParent)
throws IOException {
fsImpl.createSymlink(target, link, createParent);
}
@Override
public Path getLinkTarget(final Path f) throws IOException {
return fsImpl.getLinkTarget(f);
}
@Override //AbstractFileSystem
public String getCanonicalServiceName() {
return fsImpl.getCanonicalServiceName();
}
@Override //AbstractFileSystem
public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
return Arrays.asList(fsImpl.addDelegationTokens(renewer, null));
}
}
| 7,986 | 29.957364 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobExpander.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Unstable
class GlobExpander {
static class StringWithOffset {
String string;
int offset;
public StringWithOffset(String string, int offset) {
super();
this.string = string;
this.offset = offset;
}
}
/**
* Expand globs in the given <code>filePattern</code> into a collection of
* file patterns so that in the expanded set no file pattern has a
* slash character ("/") in a curly bracket pair.
* @param filePattern
* @return expanded file patterns
* @throws IOException
*/
public static List<String> expand(String filePattern) throws IOException {
List<String> fullyExpanded = new ArrayList<String>();
List<StringWithOffset> toExpand = new ArrayList<StringWithOffset>();
toExpand.add(new StringWithOffset(filePattern, 0));
while (!toExpand.isEmpty()) {
StringWithOffset path = toExpand.remove(0);
List<StringWithOffset> expanded = expandLeftmost(path);
if (expanded == null) {
fullyExpanded.add(path.string);
} else {
toExpand.addAll(0, expanded);
}
}
return fullyExpanded;
}
/**
* Expand the leftmost outer curly bracket pair containing a
* slash character ("/") in <code>filePattern</code>.
* @param filePattern
* @return expanded file patterns
* @throws IOException
*/
private static List<StringWithOffset> expandLeftmost(StringWithOffset
filePatternWithOffset) throws IOException {
String filePattern = filePatternWithOffset.string;
int leftmost = leftmostOuterCurlyContainingSlash(filePattern,
filePatternWithOffset.offset);
if (leftmost == -1) {
return null;
}
int curlyOpen = 0;
StringBuilder prefix = new StringBuilder(filePattern.substring(0, leftmost));
StringBuilder suffix = new StringBuilder();
List<String> alts = new ArrayList<String>();
StringBuilder alt = new StringBuilder();
StringBuilder cur = prefix;
for (int i = leftmost; i < filePattern.length(); i++) {
char c = filePattern.charAt(i);
if (cur == suffix) {
cur.append(c);
} else if (c == '\\') {
i++;
if (i >= filePattern.length()) {
throw new IOException("Illegal file pattern: "
+ "An escaped character does not present for glob "
+ filePattern + " at " + i);
}
c = filePattern.charAt(i);
cur.append(c);
} else if (c == '{') {
if (curlyOpen++ == 0) {
alt.setLength(0);
cur = alt;
} else {
cur.append(c);
}
} else if (c == '}' && curlyOpen > 0) {
if (--curlyOpen == 0) {
alts.add(alt.toString());
alt.setLength(0);
cur = suffix;
} else {
cur.append(c);
}
} else if (c == ',') {
if (curlyOpen == 1) {
alts.add(alt.toString());
alt.setLength(0);
} else {
cur.append(c);
}
} else {
cur.append(c);
}
}
List<StringWithOffset> exp = new ArrayList<StringWithOffset>();
for (String string : alts) {
exp.add(new StringWithOffset(prefix + string + suffix, prefix.length()));
}
return exp;
}
/**
* Finds the index of the leftmost opening curly bracket containing a
* slash character ("/") in <code>filePattern</code>.
* @param filePattern
* @return the index of the leftmost opening curly bracket containing a
* slash character ("/"), or -1 if there is no such bracket
* @throws IOException
*/
private static int leftmostOuterCurlyContainingSlash(String filePattern,
int offset) throws IOException {
int curlyOpen = 0;
int leftmost = -1;
boolean seenSlash = false;
for (int i = offset; i < filePattern.length(); i++) {
char c = filePattern.charAt(i);
if (c == '\\') {
i++;
if (i >= filePattern.length()) {
throw new IOException("Illegal file pattern: "
+ "An escaped character does not present for glob "
+ filePattern + " at " + i);
}
} else if (c == '{') {
if (curlyOpen++ == 0) {
leftmost = i;
}
} else if (c == '}' && curlyOpen > 0) {
if (--curlyOpen == 0 && leftmost != -1 && seenSlash) {
return leftmost;
}
} else if (c == '/' && curlyOpen > 0) {
seenSlash = true;
}
}
return -1;
}
}
| 5,538 | 31.203488 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
public class HarFs extends DelegateToFileSystem {
HarFs(final URI theUri, final Configuration conf)
throws IOException, URISyntaxException {
super(theUri, new HarFileSystem(), conf, "har", false);
}
@Override
public int getUriDefaultPort() {
return -1;
}
}
| 1,231 | 30.589744 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
/**
* This class contains options related to file system operations.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class Options {
/**
* Class to support the varargs for create() options.
*
*/
public static class CreateOpts {
private CreateOpts() { };
public static BlockSize blockSize(long bs) {
return new BlockSize(bs);
}
public static BufferSize bufferSize(int bs) {
return new BufferSize(bs);
}
public static ReplicationFactor repFac(short rf) {
return new ReplicationFactor(rf);
}
public static BytesPerChecksum bytesPerChecksum(short crc) {
return new BytesPerChecksum(crc);
}
public static ChecksumParam checksumParam(
ChecksumOpt csumOpt) {
return new ChecksumParam(csumOpt);
}
public static Perms perms(FsPermission perm) {
return new Perms(perm);
}
public static CreateParent createParent() {
return new CreateParent(true);
}
public static CreateParent donotCreateParent() {
return new CreateParent(false);
}
public static class BlockSize extends CreateOpts {
private final long blockSize;
protected BlockSize(long bs) {
if (bs <= 0) {
throw new IllegalArgumentException(
"Block size must be greater than 0");
}
blockSize = bs;
}
public long getValue() { return blockSize; }
}
public static class ReplicationFactor extends CreateOpts {
private final short replication;
protected ReplicationFactor(short rf) {
if (rf <= 0) {
throw new IllegalArgumentException(
"Replication must be greater than 0");
}
replication = rf;
}
public short getValue() { return replication; }
}
public static class BufferSize extends CreateOpts {
private final int bufferSize;
protected BufferSize(int bs) {
if (bs <= 0) {
throw new IllegalArgumentException(
"Buffer size must be greater than 0");
}
bufferSize = bs;
}
public int getValue() { return bufferSize; }
}
/** This is not needed if ChecksumParam is specified. **/
public static class BytesPerChecksum extends CreateOpts {
private final int bytesPerChecksum;
protected BytesPerChecksum(short bpc) {
if (bpc <= 0) {
throw new IllegalArgumentException(
"Bytes per checksum must be greater than 0");
}
bytesPerChecksum = bpc;
}
public int getValue() { return bytesPerChecksum; }
}
public static class ChecksumParam extends CreateOpts {
private final ChecksumOpt checksumOpt;
protected ChecksumParam(ChecksumOpt csumOpt) {
checksumOpt = csumOpt;
}
public ChecksumOpt getValue() { return checksumOpt; }
}
public static class Perms extends CreateOpts {
private final FsPermission permissions;
protected Perms(FsPermission perm) {
if(perm == null) {
throw new IllegalArgumentException("Permissions must not be null");
}
permissions = perm;
}
public FsPermission getValue() { return permissions; }
}
public static class Progress extends CreateOpts {
private final Progressable progress;
protected Progress(Progressable prog) {
if(prog == null) {
throw new IllegalArgumentException("Progress must not be null");
}
progress = prog;
}
public Progressable getValue() { return progress; }
}
public static class CreateParent extends CreateOpts {
private final boolean createParent;
protected CreateParent(boolean createPar) {
createParent = createPar;}
public boolean getValue() { return createParent; }
}
/**
* Get an option of desired type
* @param clazz is the desired class of the opt
* @param opts - not null - at least one opt must be passed
* @return an opt from one of the opts of type theClass.
* returns null if there isn't any
*/
static <T extends CreateOpts> T getOpt(Class<T> clazz, CreateOpts... opts) {
if (opts == null) {
throw new IllegalArgumentException("Null opt");
}
T result = null;
for (int i = 0; i < opts.length; ++i) {
if (opts[i].getClass() == clazz) {
if (result != null) {
throw new IllegalArgumentException("multiple opts varargs: " + clazz);
}
@SuppressWarnings("unchecked")
T t = (T)opts[i];
result = t;
}
}
return result;
}
/**
* set an option
* @param newValue the option to be set
* @param opts - the option is set into this array of opts
* @return updated CreateOpts[] == opts + newValue
*/
static <T extends CreateOpts> CreateOpts[] setOpt(final T newValue,
final CreateOpts... opts) {
final Class<?> clazz = newValue.getClass();
boolean alreadyInOpts = false;
if (opts != null) {
for (int i = 0; i < opts.length; ++i) {
if (opts[i].getClass() == clazz) {
if (alreadyInOpts) {
throw new IllegalArgumentException("multiple opts varargs: " + clazz);
}
alreadyInOpts = true;
opts[i] = newValue;
}
}
}
CreateOpts[] resultOpt = opts;
if (!alreadyInOpts) { // no newValue in opt
final int oldLength = opts == null? 0: opts.length;
CreateOpts[] newOpts = new CreateOpts[oldLength + 1];
if (oldLength > 0) {
System.arraycopy(opts, 0, newOpts, 0, oldLength);
}
newOpts[oldLength] = newValue;
resultOpt = newOpts;
}
return resultOpt;
}
}
/**
* Enum to support the varargs for rename() options
*/
public static enum Rename {
NONE((byte) 0), // No options
OVERWRITE((byte) 1); // Overwrite the rename destination
private final byte code;
private Rename(byte code) {
this.code = code;
}
public static Rename valueOf(byte code) {
return code < 0 || code >= values().length ? null : values()[code];
}
public byte value() {
return code;
}
}
/**
* This is used in FileSystem and FileContext to specify checksum options.
*/
public static class ChecksumOpt {
private final DataChecksum.Type checksumType;
private final int bytesPerChecksum;
/**
* Create a uninitialized one
*/
public ChecksumOpt() {
this(DataChecksum.Type.DEFAULT, -1);
}
/**
* Normal ctor
* @param type checksum type
* @param size bytes per checksum
*/
public ChecksumOpt(DataChecksum.Type type, int size) {
checksumType = type;
bytesPerChecksum = size;
}
public int getBytesPerChecksum() {
return bytesPerChecksum;
}
public DataChecksum.Type getChecksumType() {
return checksumType;
}
@Override
public String toString() {
return checksumType + ":" + bytesPerChecksum;
}
/**
* Create a ChecksumOpts that disables checksum
*/
public static ChecksumOpt createDisabled() {
return new ChecksumOpt(DataChecksum.Type.NULL, -1);
}
/**
* A helper method for processing user input and default value to
* create a combined checksum option. This is a bit complicated because
* bytesPerChecksum is kept for backward compatibility.
*
* @param defaultOpt Default checksum option
* @param userOpt User-specified checksum option. Ignored if null.
* @param userBytesPerChecksum User-specified bytesPerChecksum
* Ignored if < 0.
*/
public static ChecksumOpt processChecksumOpt(ChecksumOpt defaultOpt,
ChecksumOpt userOpt, int userBytesPerChecksum) {
final boolean useDefaultType;
final DataChecksum.Type type;
if (userOpt != null
&& userOpt.getChecksumType() != DataChecksum.Type.DEFAULT) {
useDefaultType = false;
type = userOpt.getChecksumType();
} else {
useDefaultType = true;
type = defaultOpt.getChecksumType();
}
// bytesPerChecksum - order of preference
// user specified value in bytesPerChecksum
// user specified value in checksumOpt
// default.
if (userBytesPerChecksum > 0) {
return new ChecksumOpt(type, userBytesPerChecksum);
} else if (userOpt != null && userOpt.getBytesPerChecksum() > 0) {
return !useDefaultType? userOpt
: new ChecksumOpt(type, userOpt.getBytesPerChecksum());
} else {
return useDefaultType? defaultOpt
: new ChecksumOpt(type, defaultOpt.getBytesPerChecksum());
}
}
/**
* A helper method for processing user input and default value to
* create a combined checksum option.
*
* @param defaultOpt Default checksum option
* @param userOpt User-specified checksum option
*/
public static ChecksumOpt processChecksumOpt(ChecksumOpt defaultOpt,
ChecksumOpt userOpt) {
return processChecksumOpt(defaultOpt, userOpt, -1);
}
}
}
| 10,429 | 30.79878 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasEnhancedByteBufferAccess.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.ByteBufferPool;
/**
* FSDataInputStreams implement this interface to provide enhanced
* byte buffer access. Usually this takes the form of mmap support.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface HasEnhancedByteBufferAccess {
/**
* Get a ByteBuffer containing file data.
*
* This ByteBuffer may come from the stream itself, via a call like mmap,
* or it may come from the ByteBufferFactory which is passed in as an
* argument.
*
* @param factory
* If this is non-null, it will be used to create a fallback
* ByteBuffer when the stream itself cannot create one.
* @param maxLength
* The maximum length of buffer to return. We may return a buffer
* which is shorter than this.
* @param opts
* Options to use when reading.
*
* @return
* We will always return an empty buffer if maxLength was 0,
* whether or not we are at EOF.
* If maxLength > 0, we will return null if the stream has
* reached EOF.
* Otherwise, we will return a ByteBuffer containing at least one
* byte. You must free this ByteBuffer when you are done with it
* by calling releaseBuffer on it. The buffer will continue to be
* readable until it is released in this manner. However, the
* input stream's close method may warn about unclosed buffers.
* @throws
* IOException: if there was an error reading.
* UnsupportedOperationException: if factory was null, and we
* needed an external byte buffer. UnsupportedOperationException
* will never be thrown unless the factory argument is null.
*/
public ByteBuffer read(ByteBufferPool factory, int maxLength,
EnumSet<ReadOption> opts)
throws IOException, UnsupportedOperationException;
/**
* Release a ByteBuffer which was created by the enhanced ByteBuffer read
* function. You must not continue using the ByteBuffer after calling this
* function.
*
* @param buffer
* The ByteBuffer to release.
*/
public void releaseBuffer(ByteBuffer buffer);
}
| 3,322 | 39.52439 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/****************************************************************
* CreateFlag specifies the file create semantic. Users can combine flags like: <br>
* <code>
* EnumSet.of(CreateFlag.CREATE, CreateFlag.APPEND)
* <code>
* <p>
*
* Use the CreateFlag as follows:
* <ol>
* <li> CREATE - to create a file if it does not exist,
* else throw FileAlreadyExists.</li>
* <li> APPEND - to append to a file if it exists,
* else throw FileNotFoundException.</li>
* <li> OVERWRITE - to truncate a file if it exists,
* else throw FileNotFoundException.</li>
* <li> CREATE|APPEND - to create a file if it does not exist,
* else append to an existing file.</li>
* <li> CREATE|OVERWRITE - to create a file if it does not exist,
* else overwrite an existing file.</li>
* <li> SYNC_BLOCK - to force closed blocks to the disk device.
* In addition {@link Syncable#hsync()} should be called after each write,
* if true synchronous behavior is required.</li>
* <li> LAZY_PERSIST - Create the block on transient storage (RAM) if
* available.</li>
* <li> APPEND_NEWBLOCK - Append data to a new block instead of end of the last
* partial block.</li>
* </ol>
*
* Following combination is not valid and will result in
* {@link HadoopIllegalArgumentException}:
* <ol>
* <li> APPEND|OVERWRITE</li>
* <li> CREATE|APPEND|OVERWRITE</li>
* </ol>
*****************************************************************/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum CreateFlag {
/**
* Create a file. See javadoc for more description
* already exists
*/
CREATE((short) 0x01),
/**
* Truncate/overwrite a file. Same as POSIX O_TRUNC. See javadoc for description.
*/
OVERWRITE((short) 0x02),
/**
* Append to a file. See javadoc for more description.
*/
APPEND((short) 0x04),
/**
* Force closed blocks to disk. Similar to POSIX O_SYNC. See javadoc for description.
*/
SYNC_BLOCK((short) 0x08),
/**
* Create the block on transient storage (RAM) if available. If
* transient storage is unavailable then the block will be created
* on disk.
*
* HDFS will make a best effort to lazily write these files to persistent
* storage, however file contents may be lost at any time due to process/
* node restarts, hence there is no guarantee of data durability.
*
* This flag must only be used for intermediate data whose loss can be
* tolerated by the application.
*/
LAZY_PERSIST((short) 0x10),
/**
* Append data to a new block instead of the end of the last partial block.
* This is only useful for APPEND.
*/
NEW_BLOCK((short) 0x20);
private final short mode;
private CreateFlag(short mode) {
this.mode = mode;
}
short getMode() {
return mode;
}
/**
* Validate the CreateFlag and throw exception if it is invalid
* @param flag set of CreateFlag
* @throws HadoopIllegalArgumentException if the CreateFlag is invalid
*/
public static void validate(EnumSet<CreateFlag> flag) {
if (flag == null || flag.isEmpty()) {
throw new HadoopIllegalArgumentException(flag
+ " does not specify any options");
}
final boolean append = flag.contains(APPEND);
final boolean overwrite = flag.contains(OVERWRITE);
// Both append and overwrite is an error
if (append && overwrite) {
throw new HadoopIllegalArgumentException(
flag + "Both append and overwrite options cannot be enabled.");
}
}
/**
* Validate the CreateFlag for create operation
* @param path Object representing the path; usually String or {@link Path}
* @param pathExists pass true if the path exists in the file system
* @param flag set of CreateFlag
* @throws IOException on error
* @throws HadoopIllegalArgumentException if the CreateFlag is invalid
*/
public static void validate(Object path, boolean pathExists,
EnumSet<CreateFlag> flag) throws IOException {
validate(flag);
final boolean append = flag.contains(APPEND);
final boolean overwrite = flag.contains(OVERWRITE);
if (pathExists) {
if (!(append || overwrite)) {
throw new FileAlreadyExistsException("File already exists: "
+ path.toString()
+ ". Append or overwrite option must be specified in " + flag);
}
} else if (!flag.contains(CREATE)) {
throw new FileNotFoundException("Non existing file: " + path.toString()
+ ". Create option is not specified in " + flag);
}
}
/**
* Validate the CreateFlag for the append operation. The flag must contain
* APPEND, and cannot contain OVERWRITE.
*/
public static void validateForAppend(EnumSet<CreateFlag> flag) {
validate(flag);
if (!flag.contains(APPEND)) {
throw new HadoopIllegalArgumentException(flag
+ " does not contain APPEND");
}
}
}
| 5,982 | 33.385057 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnresolvedLinkException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Thrown when a symbolic link is encountered in a path.
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Stable
public class UnresolvedLinkException extends IOException {
private static final long serialVersionUID = 1L;
public UnresolvedLinkException() {
super();
}
public UnresolvedLinkException(String msg) {
super(msg);
}
}
| 1,360 | 31.404762 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.ref.PhantomReference;
import java.lang.ref.ReferenceQueue;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.Stack;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import com.google.common.annotations.VisibleForTesting;
/****************************************************************
* An abstract base class for a fairly generic filesystem. It
* may be implemented as a distributed filesystem, or as a "local"
* one that reflects the locally-connected disk. The local version
* exists for small Hadoop instances and for testing.
*
* <p>
*
* All user code that may potentially use the Hadoop Distributed
* File System should be written to use a FileSystem object. The
* Hadoop DFS is a multi-machine system that appears as a single
* disk. It's useful because of its fault tolerance and potentially
* very large capacity.
*
* <p>
* The local implementation is {@link LocalFileSystem} and distributed
* implementation is DistributedFileSystem.
*****************************************************************/
@InterfaceAudience.Public
@InterfaceStability.Stable
public abstract class FileSystem extends Configured implements Closeable {
public static final String FS_DEFAULT_NAME_KEY =
CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
public static final String DEFAULT_FS =
CommonConfigurationKeys.FS_DEFAULT_NAME_DEFAULT;
public static final Log LOG = LogFactory.getLog(FileSystem.class);
/**
* Priority of the FileSystem shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 10;
/** FileSystem cache */
static final Cache CACHE = new Cache();
/** The key this instance is stored under in the cache. */
private Cache.Key key;
/** Recording statistics per a FileSystem class */
private static final Map<Class<? extends FileSystem>, Statistics>
statisticsTable =
new IdentityHashMap<Class<? extends FileSystem>, Statistics>();
/**
* The statistics for this file system.
*/
protected Statistics statistics;
/**
* A cache of files that should be deleted when filsystem is closed
* or the JVM is exited.
*/
private Set<Path> deleteOnExit = new TreeSet<Path>();
boolean resolveSymlinks;
/**
* This method adds a file system for testing so that we can find it later. It
* is only for testing.
* @param uri the uri to store it under
* @param conf the configuration to store it under
* @param fs the file system to store
* @throws IOException
*/
static void addFileSystemForTesting(URI uri, Configuration conf,
FileSystem fs) throws IOException {
CACHE.map.put(new Cache.Key(uri, conf), fs);
}
/**
* Get a filesystem instance based on the uri, the passed
* configuration and the user
* @param uri of the filesystem
* @param conf the configuration to use
* @param user to perform the get as
* @return the filesystem instance
* @throws IOException
* @throws InterruptedException
*/
public static FileSystem get(final URI uri, final Configuration conf,
final String user) throws IOException, InterruptedException {
String ticketCachePath =
conf.get(CommonConfigurationKeys.KERBEROS_TICKET_CACHE_PATH);
UserGroupInformation ugi =
UserGroupInformation.getBestUGI(ticketCachePath, user);
return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws IOException {
return get(uri, conf);
}
});
}
/**
* Returns the configured filesystem implementation.
* @param conf the configuration to use
*/
public static FileSystem get(Configuration conf) throws IOException {
return get(getDefaultUri(conf), conf);
}
/** Get the default filesystem URI from a configuration.
* @param conf the configuration to use
* @return the uri of the default filesystem
*/
public static URI getDefaultUri(Configuration conf) {
return URI.create(fixName(conf.get(FS_DEFAULT_NAME_KEY, DEFAULT_FS)));
}
/** Set the default filesystem URI in a configuration.
* @param conf the configuration to alter
* @param uri the new default filesystem uri
*/
public static void setDefaultUri(Configuration conf, URI uri) {
conf.set(FS_DEFAULT_NAME_KEY, uri.toString());
}
/** Set the default filesystem URI in a configuration.
* @param conf the configuration to alter
* @param uri the new default filesystem uri
*/
public static void setDefaultUri(Configuration conf, String uri) {
setDefaultUri(conf, URI.create(fixName(uri)));
}
/** Called after a new FileSystem instance is constructed.
* @param name a uri whose authority section names the host, port, etc.
* for this FileSystem
* @param conf the configuration
*/
public void initialize(URI name, Configuration conf) throws IOException {
statistics = getStatistics(name.getScheme(), getClass());
resolveSymlinks = conf.getBoolean(
CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY,
CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_DEFAULT);
}
/**
* Return the protocol scheme for the FileSystem.
* <p/>
* This implementation throws an <code>UnsupportedOperationException</code>.
*
* @return the protocol scheme for the FileSystem.
*/
public String getScheme() {
throw new UnsupportedOperationException("Not implemented by the " + getClass().getSimpleName() + " FileSystem implementation");
}
/** Returns a URI whose scheme and authority identify this FileSystem.*/
public abstract URI getUri();
/**
* Return a canonicalized form of this FileSystem's URI.
*
* The default implementation simply calls {@link #canonicalizeUri(URI)}
* on the filesystem's own URI, so subclasses typically only need to
* implement that method.
*
* @see #canonicalizeUri(URI)
*/
protected URI getCanonicalUri() {
return canonicalizeUri(getUri());
}
/**
* Canonicalize the given URI.
*
* This is filesystem-dependent, but may for example consist of
* canonicalizing the hostname using DNS and adding the default
* port if not specified.
*
* The default implementation simply fills in the default port if
* not specified and if the filesystem has a default port.
*
* @return URI
* @see NetUtils#getCanonicalUri(URI, int)
*/
protected URI canonicalizeUri(URI uri) {
if (uri.getPort() == -1 && getDefaultPort() > 0) {
// reconstruct the uri with the default port set
try {
uri = new URI(uri.getScheme(), uri.getUserInfo(),
uri.getHost(), getDefaultPort(),
uri.getPath(), uri.getQuery(), uri.getFragment());
} catch (URISyntaxException e) {
// Should never happen!
throw new AssertionError("Valid URI became unparseable: " +
uri);
}
}
return uri;
}
/**
* Get the default port for this file system.
* @return the default port or 0 if there isn't one
*/
protected int getDefaultPort() {
return 0;
}
protected static FileSystem getFSofPath(final Path absOrFqPath,
final Configuration conf)
throws UnsupportedFileSystemException, IOException {
absOrFqPath.checkNotSchemeWithRelative();
absOrFqPath.checkNotRelative();
// Uses the default file system if not fully qualified
return get(absOrFqPath.toUri(), conf);
}
/**
* Get a canonical service name for this file system. The token cache is
* the only user of the canonical service name, and uses it to lookup this
* filesystem's service tokens.
* If file system provides a token of its own then it must have a canonical
* name, otherwise canonical name can be null.
*
* Default Impl: If the file system has child file systems
* (such as an embedded file system) then it is assumed that the fs has no
* tokens of its own and hence returns a null name; otherwise a service
* name is built using Uri and port.
*
* @return a service string that uniquely identifies this file system, null
* if the filesystem does not implement tokens
* @see SecurityUtil#buildDTServiceName(URI, int)
*/
@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
public String getCanonicalServiceName() {
return (getChildFileSystems() == null)
? SecurityUtil.buildDTServiceName(getUri(), getDefaultPort())
: null;
}
/** @deprecated call #getUri() instead.*/
@Deprecated
public String getName() { return getUri().toString(); }
/** @deprecated call #get(URI,Configuration) instead. */
@Deprecated
public static FileSystem getNamed(String name, Configuration conf)
throws IOException {
return get(URI.create(fixName(name)), conf);
}
/** Update old-format filesystem names, for back-compatibility. This should
* eventually be replaced with a checkName() method that throws an exception
* for old-format names. */
private static String fixName(String name) {
// convert old-format name to new-format name
if (name.equals("local")) { // "local" is now "file:///".
LOG.warn("\"local\" is a deprecated filesystem name."
+" Use \"file:///\" instead.");
name = "file:///";
} else if (name.indexOf('/')==-1) { // unqualified is "hdfs://"
LOG.warn("\""+name+"\" is a deprecated filesystem name."
+" Use \"hdfs://"+name+"/\" instead.");
name = "hdfs://"+name;
}
return name;
}
/**
* Get the local file system.
* @param conf the configuration to configure the file system with
* @return a LocalFileSystem
*/
public static LocalFileSystem getLocal(Configuration conf)
throws IOException {
return (LocalFileSystem)get(LocalFileSystem.NAME, conf);
}
/** Returns the FileSystem for this URI's scheme and authority. The scheme
* of the URI determines a configuration property name,
* <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
* The entire URI is passed to the FileSystem instance's initialize method.
*/
public static FileSystem get(URI uri, Configuration conf) throws IOException {
String scheme = uri.getScheme();
String authority = uri.getAuthority();
if (scheme == null && authority == null) { // use default FS
return get(conf);
}
if (scheme != null && authority == null) { // no authority
URI defaultUri = getDefaultUri(conf);
if (scheme.equals(defaultUri.getScheme()) // if scheme matches default
&& defaultUri.getAuthority() != null) { // & default has authority
return get(defaultUri, conf); // return default
}
}
String disableCacheName = String.format("fs.%s.impl.disable.cache", scheme);
if (conf.getBoolean(disableCacheName, false)) {
return createFileSystem(uri, conf);
}
return CACHE.get(uri, conf);
}
/**
* Returns the FileSystem for this URI's scheme and authority and the
* passed user. Internally invokes {@link #newInstance(URI, Configuration)}
* @param uri of the filesystem
* @param conf the configuration to use
* @param user to perform the get as
* @return filesystem instance
* @throws IOException
* @throws InterruptedException
*/
public static FileSystem newInstance(final URI uri, final Configuration conf,
final String user) throws IOException, InterruptedException {
String ticketCachePath =
conf.get(CommonConfigurationKeys.KERBEROS_TICKET_CACHE_PATH);
UserGroupInformation ugi =
UserGroupInformation.getBestUGI(ticketCachePath, user);
return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws IOException {
return newInstance(uri,conf);
}
});
}
/** Returns the FileSystem for this URI's scheme and authority. The scheme
* of the URI determines a configuration property name,
* <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
* The entire URI is passed to the FileSystem instance's initialize method.
* This always returns a new FileSystem object.
*/
public static FileSystem newInstance(URI uri, Configuration conf) throws IOException {
String scheme = uri.getScheme();
String authority = uri.getAuthority();
if (scheme == null) { // no scheme: use default FS
return newInstance(conf);
}
if (authority == null) { // no authority
URI defaultUri = getDefaultUri(conf);
if (scheme.equals(defaultUri.getScheme()) // if scheme matches default
&& defaultUri.getAuthority() != null) { // & default has authority
return newInstance(defaultUri, conf); // return default
}
}
return CACHE.getUnique(uri, conf);
}
/** Returns a unique configured filesystem implementation.
* This always returns a new FileSystem object.
* @param conf the configuration to use
*/
public static FileSystem newInstance(Configuration conf) throws IOException {
return newInstance(getDefaultUri(conf), conf);
}
/**
* Get a unique local file system object
* @param conf the configuration to configure the file system with
* @return a LocalFileSystem
* This always returns a new FileSystem object.
*/
public static LocalFileSystem newInstanceLocal(Configuration conf)
throws IOException {
return (LocalFileSystem)newInstance(LocalFileSystem.NAME, conf);
}
/**
* Close all cached filesystems. Be sure those filesystems are not
* used anymore.
*
* @throws IOException
*/
public static void closeAll() throws IOException {
CACHE.closeAll();
}
/**
* Close all cached filesystems for a given UGI. Be sure those filesystems
* are not used anymore.
* @param ugi user group info to close
* @throws IOException
*/
public static void closeAllForUGI(UserGroupInformation ugi)
throws IOException {
CACHE.closeAll(ugi);
}
/**
* Make sure that a path specifies a FileSystem.
* @param path to use
*/
public Path makeQualified(Path path) {
checkPath(path);
return path.makeQualified(this.getUri(), this.getWorkingDirectory());
}
/**
* Get a new delegation token for this file system.
* This is an internal method that should have been declared protected
* but wasn't historically.
* Callers should use {@link #addDelegationTokens(String, Credentials)}
*
* @param renewer the account name that is allowed to renew the token.
* @return a new delegation token
* @throws IOException
*/
@InterfaceAudience.Private()
public Token<?> getDelegationToken(String renewer) throws IOException {
return null;
}
/**
* Obtain all delegation tokens used by this FileSystem that are not
* already present in the given Credentials. Existing tokens will neither
* be verified as valid nor having the given renewer. Missing tokens will
* be acquired and added to the given Credentials.
*
* Default Impl: works for simple fs with its own token
* and also for an embedded fs whose tokens are those of its
* children file system (i.e. the embedded fs has not tokens of its
* own).
*
* @param renewer the user allowed to renew the delegation tokens
* @param credentials cache in which to add new delegation tokens
* @return list of new delegation tokens
* @throws IOException
*/
@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
public Token<?>[] addDelegationTokens(
final String renewer, Credentials credentials) throws IOException {
if (credentials == null) {
credentials = new Credentials();
}
final List<Token<?>> tokens = new ArrayList<Token<?>>();
collectDelegationTokens(renewer, credentials, tokens);
return tokens.toArray(new Token<?>[tokens.size()]);
}
/**
* Recursively obtain the tokens for this FileSystem and all descended
* FileSystems as determined by getChildFileSystems().
* @param renewer the user allowed to renew the delegation tokens
* @param credentials cache in which to add the new delegation tokens
* @param tokens list in which to add acquired tokens
* @throws IOException
*/
private void collectDelegationTokens(final String renewer,
final Credentials credentials,
final List<Token<?>> tokens)
throws IOException {
final String serviceName = getCanonicalServiceName();
// Collect token of the this filesystem and then of its embedded children
if (serviceName != null) { // fs has token, grab it
final Text service = new Text(serviceName);
Token<?> token = credentials.getToken(service);
if (token == null) {
token = getDelegationToken(renewer);
if (token != null) {
tokens.add(token);
credentials.addToken(service, token);
}
}
}
// Now collect the tokens from the children
final FileSystem[] children = getChildFileSystems();
if (children != null) {
for (final FileSystem fs : children) {
fs.collectDelegationTokens(renewer, credentials, tokens);
}
}
}
/**
* Get all the immediate child FileSystems embedded in this FileSystem.
* It does not recurse and get grand children. If a FileSystem
* has multiple child FileSystems, then it should return a unique list
* of those FileSystems. Default is to return null to signify no children.
*
* @return FileSystems used by this FileSystem
*/
@InterfaceAudience.LimitedPrivate({ "HDFS" })
@VisibleForTesting
public FileSystem[] getChildFileSystems() {
return null;
}
/** create a file with the provided permission
* The permission of the file is set to be the provided permission as in
* setPermission, not permission&~umask
*
* It is implemented using two RPCs. It is understood that it is inefficient,
* but the implementation is thread-safe. The other option is to change the
* value of umask in configuration to be 0, but it is not thread-safe.
*
* @param fs file system handle
* @param file the name of the file to be created
* @param permission the permission of the file
* @return an output stream
* @throws IOException
*/
public static FSDataOutputStream create(FileSystem fs,
Path file, FsPermission permission) throws IOException {
// create the file with default permission
FSDataOutputStream out = fs.create(file);
// set its permission to the supplied one
fs.setPermission(file, permission);
return out;
}
/** create a directory with the provided permission
* The permission of the directory is set to be the provided permission as in
* setPermission, not permission&~umask
*
* @see #create(FileSystem, Path, FsPermission)
*
* @param fs file system handle
* @param dir the name of the directory to be created
* @param permission the permission of the directory
* @return true if the directory creation succeeds; false otherwise
* @throws IOException
*/
public static boolean mkdirs(FileSystem fs, Path dir, FsPermission permission)
throws IOException {
// create the directory using the default permission
boolean result = fs.mkdirs(dir);
// set its permission to be the supplied one
fs.setPermission(dir, permission);
return result;
}
///////////////////////////////////////////////////////////////
// FileSystem
///////////////////////////////////////////////////////////////
protected FileSystem() {
super(null);
}
/**
* Check that a Path belongs to this FileSystem.
* @param path to check
*/
protected void checkPath(Path path) {
URI uri = path.toUri();
String thatScheme = uri.getScheme();
if (thatScheme == null) // fs is relative
return;
URI thisUri = getCanonicalUri();
String thisScheme = thisUri.getScheme();
//authority and scheme are not case sensitive
if (thisScheme.equalsIgnoreCase(thatScheme)) {// schemes match
String thisAuthority = thisUri.getAuthority();
String thatAuthority = uri.getAuthority();
if (thatAuthority == null && // path's authority is null
thisAuthority != null) { // fs has an authority
URI defaultUri = getDefaultUri(getConf());
if (thisScheme.equalsIgnoreCase(defaultUri.getScheme())) {
uri = defaultUri; // schemes match, so use this uri instead
} else {
uri = null; // can't determine auth of the path
}
}
if (uri != null) {
// canonicalize uri before comparing with this fs
uri = canonicalizeUri(uri);
thatAuthority = uri.getAuthority();
if (thisAuthority == thatAuthority || // authorities match
(thisAuthority != null &&
thisAuthority.equalsIgnoreCase(thatAuthority)))
return;
}
}
throw new IllegalArgumentException("Wrong FS: "+path+
", expected: "+this.getUri());
}
/**
* Return an array containing hostnames, offset and size of
* portions of the given file. For a nonexistent
* file or regions, null will be returned.
*
* This call is most helpful with DFS, where it returns
* hostnames of machines that contain the given file.
*
* The FileSystem will simply return an elt containing 'localhost'.
*
* @param file FilesStatus to get data from
* @param start offset into the given file
* @param len length for which to get locations for
*/
public BlockLocation[] getFileBlockLocations(FileStatus file,
long start, long len) throws IOException {
if (file == null) {
return null;
}
if (start < 0 || len < 0) {
throw new IllegalArgumentException("Invalid start or len parameter");
}
if (file.getLen() <= start) {
return new BlockLocation[0];
}
String[] name = { "localhost:50010" };
String[] host = { "localhost" };
return new BlockLocation[] {
new BlockLocation(name, host, 0, file.getLen()) };
}
/**
* Return an array containing hostnames, offset and size of
* portions of the given file. For a nonexistent
* file or regions, null will be returned.
*
* This call is most helpful with DFS, where it returns
* hostnames of machines that contain the given file.
*
* The FileSystem will simply return an elt containing 'localhost'.
*
* @param p path is used to identify an FS since an FS could have
* another FS that it could be delegating the call to
* @param start offset into the given file
* @param len length for which to get locations for
*/
public BlockLocation[] getFileBlockLocations(Path p,
long start, long len) throws IOException {
if (p == null) {
throw new NullPointerException();
}
FileStatus file = getFileStatus(p);
return getFileBlockLocations(file, start, len);
}
/**
* Return a set of server default configuration values
* @return server default configuration values
* @throws IOException
* @deprecated use {@link #getServerDefaults(Path)} instead
*/
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
Configuration conf = getConf();
// CRC32 is chosen as default as it is available in all
// releases that support checksum.
// The client trash configuration is ignored.
return new FsServerDefaults(getDefaultBlockSize(),
conf.getInt("io.bytes.per.checksum", 512),
64 * 1024,
getDefaultReplication(),
conf.getInt("io.file.buffer.size", 4096),
false,
CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT,
DataChecksum.Type.CRC32);
}
/**
* Return a set of server default configuration values
* @param p path is used to identify an FS since an FS could have
* another FS that it could be delegating the call to
* @return server default configuration values
* @throws IOException
*/
public FsServerDefaults getServerDefaults(Path p) throws IOException {
return getServerDefaults();
}
/**
* Return the fully-qualified path of path f resolving the path
* through any symlinks or mount point
* @param p path to be resolved
* @return fully qualified path
* @throws FileNotFoundException
*/
public Path resolvePath(final Path p) throws IOException {
checkPath(p);
return getFileStatus(p).getPath();
}
/**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
public abstract FSDataInputStream open(Path f, int bufferSize)
throws IOException;
/**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file to open
*/
public FSDataInputStream open(Path f) throws IOException {
return open(f, getConf().getInt("io.file.buffer.size", 4096));
}
/**
* Create an FSDataOutputStream at the indicated Path.
* Files are overwritten by default.
* @param f the file to create
*/
public FSDataOutputStream create(Path f) throws IOException {
return create(f, true);
}
/**
* Create an FSDataOutputStream at the indicated Path.
* @param f the file to create
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an exception will be thrown.
*/
public FSDataOutputStream create(Path f, boolean overwrite)
throws IOException {
return create(f, overwrite,
getConf().getInt("io.file.buffer.size", 4096),
getDefaultReplication(f),
getDefaultBlockSize(f));
}
/**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* Files are overwritten by default.
* @param f the file to create
* @param progress to report progress
*/
public FSDataOutputStream create(Path f, Progressable progress)
throws IOException {
return create(f, true,
getConf().getInt("io.file.buffer.size", 4096),
getDefaultReplication(f),
getDefaultBlockSize(f), progress);
}
/**
* Create an FSDataOutputStream at the indicated Path.
* Files are overwritten by default.
* @param f the file to create
* @param replication the replication factor
*/
public FSDataOutputStream create(Path f, short replication)
throws IOException {
return create(f, true,
getConf().getInt("io.file.buffer.size", 4096),
replication,
getDefaultBlockSize(f));
}
/**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* Files are overwritten by default.
* @param f the file to create
* @param replication the replication factor
* @param progress to report progress
*/
public FSDataOutputStream create(Path f, short replication,
Progressable progress) throws IOException {
return create(f, true,
getConf().getInt(
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),
replication,
getDefaultBlockSize(f), progress);
}
/**
* Create an FSDataOutputStream at the indicated Path.
* @param f the file name to create
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
*/
public FSDataOutputStream create(Path f,
boolean overwrite,
int bufferSize
) throws IOException {
return create(f, overwrite, bufferSize,
getDefaultReplication(f),
getDefaultBlockSize(f));
}
/**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* @param f the path of the file to open
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
*/
public FSDataOutputStream create(Path f,
boolean overwrite,
int bufferSize,
Progressable progress
) throws IOException {
return create(f, overwrite, bufferSize,
getDefaultReplication(f),
getDefaultBlockSize(f), progress);
}
/**
* Create an FSDataOutputStream at the indicated Path.
* @param f the file name to open
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
*/
public FSDataOutputStream create(Path f,
boolean overwrite,
int bufferSize,
short replication,
long blockSize
) throws IOException {
return create(f, overwrite, bufferSize, replication, blockSize, null);
}
/**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* @param f the file name to open
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
*/
public FSDataOutputStream create(Path f,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
Progressable progress
) throws IOException {
return this.create(f, FsPermission.getFileDefault().applyUMask(
FsPermission.getUMask(getConf())), overwrite, bufferSize,
replication, blockSize, progress);
}
/**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* @param f the file name to open
* @param permission
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
*/
public abstract FSDataOutputStream create(Path f,
FsPermission permission,
boolean overwrite,
int bufferSize,
short replication,
long blockSize,
Progressable progress) throws IOException;
/**
* Create an FSDataOutputStream at the indicated Path with write-progress
* reporting.
* @param f the file name to open
* @param permission
* @param flags {@link CreateFlag}s to use for this stream.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
*/
public FSDataOutputStream create(Path f,
FsPermission permission,
EnumSet<CreateFlag> flags,
int bufferSize,
short replication,
long blockSize,
Progressable progress) throws IOException {
return create(f, permission, flags, bufferSize, replication,
blockSize, progress, null);
}
/**
* Create an FSDataOutputStream at the indicated Path with a custom
* checksum option
* @param f the file name to open
* @param permission
* @param flags {@link CreateFlag}s to use for this stream.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize
* @param progress
* @param checksumOpt checksum parameter. If null, the values
* found in conf will be used.
* @throws IOException
* @see #setPermission(Path, FsPermission)
*/
public FSDataOutputStream create(Path f,
FsPermission permission,
EnumSet<CreateFlag> flags,
int bufferSize,
short replication,
long blockSize,
Progressable progress,
ChecksumOpt checksumOpt) throws IOException {
// Checksum options are ignored by default. The file systems that
// implement checksum need to override this method. The full
// support is currently only available in DFS.
return create(f, permission, flags.contains(CreateFlag.OVERWRITE),
bufferSize, replication, blockSize, progress);
}
/*.
* This create has been added to support the FileContext that processes
* the permission
* with umask before calling this method.
* This a temporary method added to support the transition from FileSystem
* to FileContext for user applications.
*/
@Deprecated
protected FSDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
short replication, long blockSize, Progressable progress,
ChecksumOpt checksumOpt) throws IOException {
boolean pathExists = exists(f);
CreateFlag.validate(f, pathExists, flag);
// Default impl assumes that permissions do not matter and
// nor does the bytesPerChecksum hence
// calling the regular create is good enough.
// FSs that implement permissions should override this.
if (pathExists && flag.contains(CreateFlag.APPEND)) {
return append(f, bufferSize, progress);
}
return this.create(f, absolutePermission,
flag.contains(CreateFlag.OVERWRITE), bufferSize, replication,
blockSize, progress);
}
/**
* This version of the mkdirs method assumes that the permission is absolute.
* It has been added to support the FileContext that processes the permission
* with umask before calling this method.
* This a temporary method added to support the transition from FileSystem
* to FileContext for user applications.
*/
@Deprecated
protected boolean primitiveMkdir(Path f, FsPermission absolutePermission)
throws IOException {
// Default impl is to assume that permissions do not matter and hence
// calling the regular mkdirs is good enough.
// FSs that implement permissions should override this.
return this.mkdirs(f, absolutePermission);
}
/**
* This version of the mkdirs method assumes that the permission is absolute.
* It has been added to support the FileContext that processes the permission
* with umask before calling this method.
* This a temporary method added to support the transition from FileSystem
* to FileContext for user applications.
*/
@Deprecated
protected void primitiveMkdir(Path f, FsPermission absolutePermission,
boolean createParent)
throws IOException {
if (!createParent) { // parent must exist.
// since the this.mkdirs makes parent dirs automatically
// we must throw exception if parent does not exist.
final FileStatus stat = getFileStatus(f.getParent());
if (stat == null) {
throw new FileNotFoundException("Missing parent:" + f);
}
if (!stat.isDirectory()) {
throw new ParentNotDirectoryException("parent is not a dir");
}
// parent does exist - go ahead with mkdir of leaf
}
// Default impl is to assume that permissions do not matter and hence
// calling the regular mkdirs is good enough.
// FSs that implement permissions should override this.
if (!this.mkdirs(f, absolutePermission)) {
throw new IOException("mkdir of "+ f + " failed");
}
}
/**
* Opens an FSDataOutputStream at the indicated Path with write-progress
* reporting. Same as create(), except fails if parent directory doesn't
* already exist.
* @param f the file name to open
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
@Deprecated
public FSDataOutputStream createNonRecursive(Path f,
boolean overwrite,
int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return this.createNonRecursive(f, FsPermission.getFileDefault(),
overwrite, bufferSize, replication, blockSize, progress);
}
/**
* Opens an FSDataOutputStream at the indicated Path with write-progress
* reporting. Same as create(), except fails if parent directory doesn't
* already exist.
* @param f the file name to open
* @param permission
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return createNonRecursive(f, permission,
overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
: EnumSet.of(CreateFlag.CREATE), bufferSize,
replication, blockSize, progress);
}
/**
* Opens an FSDataOutputStream at the indicated Path with write-progress
* reporting. Same as create(), except fails if parent directory doesn't
* already exist.
* @param f the file name to open
* @param permission
* @param flags {@link CreateFlag}s to use for this stream.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
throw new IOException("createNonRecursive unsupported for this filesystem "
+ this.getClass());
}
/**
* Creates the given Path as a brand-new zero-length file. If
* create fails, or if it already existed, return false.
*
* @param f path to use for create
*/
public boolean createNewFile(Path f) throws IOException {
if (exists(f)) {
return false;
} else {
create(f, false, getConf().getInt("io.file.buffer.size", 4096)).close();
return true;
}
}
/**
* Append to an existing file (optional operation).
* Same as append(f, getConf().getInt("io.file.buffer.size", 4096), null)
* @param f the existing file to be appended.
* @throws IOException
*/
public FSDataOutputStream append(Path f) throws IOException {
return append(f, getConf().getInt("io.file.buffer.size", 4096), null);
}
/**
* Append to an existing file (optional operation).
* Same as append(f, bufferSize, null).
* @param f the existing file to be appended.
* @param bufferSize the size of the buffer to be used.
* @throws IOException
*/
public FSDataOutputStream append(Path f, int bufferSize) throws IOException {
return append(f, bufferSize, null);
}
/**
* Append to an existing file (optional operation).
* @param f the existing file to be appended.
* @param bufferSize the size of the buffer to be used.
* @param progress for reporting progress if it is not null.
* @throws IOException
*/
public abstract FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException;
/**
* Concat existing files together.
* @param trg the path to the target destination.
* @param psrcs the paths to the sources to use for the concatenation.
* @throws IOException
*/
public void concat(final Path trg, final Path [] psrcs) throws IOException {
throw new UnsupportedOperationException("Not implemented by the " +
getClass().getSimpleName() + " FileSystem implementation");
}
/**
* Get replication.
*
* @deprecated Use getFileStatus() instead
* @param src file name
* @return file replication
* @throws IOException
*/
@Deprecated
public short getReplication(Path src) throws IOException {
return getFileStatus(src).getReplication();
}
/**
* Set replication for an existing file.
*
* @param src file name
* @param replication new replication
* @throws IOException
* @return true if successful;
* false if file does not exist or is a directory
*/
public boolean setReplication(Path src, short replication)
throws IOException {
return true;
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
* @param src path to be renamed
* @param dst new path after rename
* @throws IOException on failure
* @return true if rename is successful
*/
public abstract boolean rename(Path src, Path dst) throws IOException;
/**
* Renames Path src to Path dst
* <ul>
* <li
* <li>Fails if src is a file and dst is a directory.
* <li>Fails if src is a directory and dst is a file.
* <li>Fails if the parent of dst does not exist or is a file.
* </ul>
* <p>
* If OVERWRITE option is not passed as an argument, rename fails
* if the dst already exists.
* <p>
* If OVERWRITE option is passed as an argument, rename overwrites
* the dst if it is a file or an empty directory. Rename fails if dst is
* a non-empty directory.
* <p>
* Note that atomicity of rename is dependent on the file system
* implementation. Please refer to the file system documentation for
* details. This default implementation is non atomic.
* <p>
* This method is deprecated since it is a temporary method added to
* support the transition from FileSystem to FileContext for user
* applications.
*
* @param src path to be renamed
* @param dst new path after rename
* @throws IOException on failure
*/
@Deprecated
protected void rename(final Path src, final Path dst,
final Rename... options) throws IOException {
// Default implementation
final FileStatus srcStatus = getFileLinkStatus(src);
if (srcStatus == null) {
throw new FileNotFoundException("rename source " + src + " not found.");
}
boolean overwrite = false;
if (null != options) {
for (Rename option : options) {
if (option == Rename.OVERWRITE) {
overwrite = true;
}
}
}
FileStatus dstStatus;
try {
dstStatus = getFileLinkStatus(dst);
} catch (IOException e) {
dstStatus = null;
}
if (dstStatus != null) {
if (srcStatus.isDirectory() != dstStatus.isDirectory()) {
throw new IOException("Source " + src + " Destination " + dst
+ " both should be either file or directory");
}
if (!overwrite) {
throw new FileAlreadyExistsException("rename destination " + dst
+ " already exists.");
}
// Delete the destination that is a file or an empty directory
if (dstStatus.isDirectory()) {
FileStatus[] list = listStatus(dst);
if (list != null && list.length != 0) {
throw new IOException(
"rename cannot overwrite non empty destination directory " + dst);
}
}
delete(dst, false);
} else {
final Path parent = dst.getParent();
final FileStatus parentStatus = getFileStatus(parent);
if (parentStatus == null) {
throw new FileNotFoundException("rename destination parent " + parent
+ " not found.");
}
if (!parentStatus.isDirectory()) {
throw new ParentNotDirectoryException("rename destination parent " + parent
+ " is a file.");
}
}
if (!rename(src, dst)) {
throw new IOException("rename from " + src + " to " + dst + " failed.");
}
}
/**
* Truncate the file in the indicated path to the indicated size.
* <ul>
* <li>Fails if path is a directory.
* <li>Fails if path does not exist.
* <li>Fails if path is not closed.
* <li>Fails if new size is greater than current size.
* </ul>
* @param f The path to the file to be truncated
* @param newLength The size the file is to be truncated to
*
* @return <code>true</code> if the file has been truncated to the desired
* <code>newLength</code> and is immediately available to be reused for
* write operations such as <code>append</code>, or
* <code>false</code> if a background process of adjusting the length of
* the last block has been started, and clients should wait for it to
* complete before proceeding with further file updates.
*/
public boolean truncate(Path f, long newLength) throws IOException {
throw new UnsupportedOperationException("Not implemented by the " +
getClass().getSimpleName() + " FileSystem implementation");
}
/**
* Delete a file
* @deprecated Use {@link #delete(Path, boolean)} instead.
*/
@Deprecated
public boolean delete(Path f) throws IOException {
return delete(f, true);
}
/** Delete a file.
*
* @param f the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
* @return true if delete is successful else false.
* @throws IOException
*/
public abstract boolean delete(Path f, boolean recursive) throws IOException;
/**
* Mark a path to be deleted when FileSystem is closed.
* When the JVM shuts down,
* all FileSystem objects will be closed automatically.
* Then,
* the marked path will be deleted as a result of closing the FileSystem.
*
* The path has to exist in the file system.
*
* @param f the path to delete.
* @return true if deleteOnExit is successful, otherwise false.
* @throws IOException
*/
public boolean deleteOnExit(Path f) throws IOException {
if (!exists(f)) {
return false;
}
synchronized (deleteOnExit) {
deleteOnExit.add(f);
}
return true;
}
/**
* Cancel the deletion of the path when the FileSystem is closed
* @param f the path to cancel deletion
*/
public boolean cancelDeleteOnExit(Path f) {
synchronized (deleteOnExit) {
return deleteOnExit.remove(f);
}
}
/**
* Delete all files that were marked as delete-on-exit. This recursively
* deletes all files in the specified paths.
*/
protected void processDeleteOnExit() {
synchronized (deleteOnExit) {
for (Iterator<Path> iter = deleteOnExit.iterator(); iter.hasNext();) {
Path path = iter.next();
try {
if (exists(path)) {
delete(path, true);
}
}
catch (IOException e) {
LOG.info("Ignoring failure to deleteOnExit for path " + path);
}
iter.remove();
}
}
}
/** Check if exists.
* @param f source file
*/
public boolean exists(Path f) throws IOException {
try {
return getFileStatus(f) != null;
} catch (FileNotFoundException e) {
return false;
}
}
/** True iff the named path is a directory.
* Note: Avoid using this method. Instead reuse the FileStatus
* returned by getFileStatus() or listStatus() methods.
* @param f path to check
*/
public boolean isDirectory(Path f) throws IOException {
try {
return getFileStatus(f).isDirectory();
} catch (FileNotFoundException e) {
return false; // f does not exist
}
}
/** True iff the named path is a regular file.
* Note: Avoid using this method. Instead reuse the FileStatus
* returned by getFileStatus() or listStatus() methods.
* @param f path to check
*/
public boolean isFile(Path f) throws IOException {
try {
return getFileStatus(f).isFile();
} catch (FileNotFoundException e) {
return false; // f does not exist
}
}
/** The number of bytes in a file. */
/** @deprecated Use getFileStatus() instead */
@Deprecated
public long getLength(Path f) throws IOException {
return getFileStatus(f).getLen();
}
/** Return the {@link ContentSummary} of a given {@link Path}.
* @param f path to use
*/
public ContentSummary getContentSummary(Path f) throws IOException {
FileStatus status = getFileStatus(f);
if (status.isFile()) {
// f is a file
long length = status.getLen();
return new ContentSummary.Builder().length(length).
fileCount(1).directoryCount(0).spaceConsumed(length).build();
}
// f is a directory
long[] summary = {0, 0, 1};
for(FileStatus s : listStatus(f)) {
long length = s.getLen();
ContentSummary c = s.isDirectory() ? getContentSummary(s.getPath()) :
new ContentSummary.Builder().length(length).
fileCount(1).directoryCount(0).spaceConsumed(length).build();
summary[0] += c.getLength();
summary[1] += c.getFileCount();
summary[2] += c.getDirectoryCount();
}
return new ContentSummary.Builder().length(summary[0]).
fileCount(summary[1]).directoryCount(summary[2]).
spaceConsumed(summary[0]).build();
}
final private static PathFilter DEFAULT_FILTER = new PathFilter() {
@Override
public boolean accept(Path file) {
return true;
}
};
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param f given path
* @return the statuses of the files/directories in the given patch
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
public abstract FileStatus[] listStatus(Path f) throws FileNotFoundException,
IOException;
/*
* Filter files/directories in the given path using the user-supplied path
* filter. Results are added to the given array <code>results</code>.
*/
private void listStatus(ArrayList<FileStatus> results, Path f,
PathFilter filter) throws FileNotFoundException, IOException {
FileStatus listing[] = listStatus(f);
if (listing == null) {
throw new IOException("Error accessing " + f);
}
for (int i = 0; i < listing.length; i++) {
if (filter.accept(listing[i].getPath())) {
results.add(listing[i]);
}
}
}
/**
* @return an iterator over the corrupt files under the given path
* (may contain duplicates if a file has more than one corrupt block)
* @throws IOException
*/
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getCanonicalName() +
" does not support" +
" listCorruptFileBlocks");
}
/**
* Filter files/directories in the given path using the user-supplied path
* filter.
*
* @param f
* a path name
* @param filter
* the user-supplied path filter
* @return an array of FileStatus objects for the files under the given path
* after applying the filter
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
public FileStatus[] listStatus(Path f, PathFilter filter)
throws FileNotFoundException, IOException {
ArrayList<FileStatus> results = new ArrayList<FileStatus>();
listStatus(results, f, filter);
return results.toArray(new FileStatus[results.size()]);
}
/**
* Filter files/directories in the given list of paths using default
* path filter.
*
* @param files
* a list of paths
* @return a list of statuses for the files under the given paths after
* applying the filter default Path filter
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
public FileStatus[] listStatus(Path[] files)
throws FileNotFoundException, IOException {
return listStatus(files, DEFAULT_FILTER);
}
/**
* Filter files/directories in the given list of paths using user-supplied
* path filter.
*
* @param files
* a list of paths
* @param filter
* the user-supplied path filter
* @return a list of statuses for the files under the given paths after
* applying the filter
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
public FileStatus[] listStatus(Path[] files, PathFilter filter)
throws FileNotFoundException, IOException {
ArrayList<FileStatus> results = new ArrayList<FileStatus>();
for (int i = 0; i < files.length; i++) {
listStatus(results, files[i], filter);
}
return results.toArray(new FileStatus[results.size()]);
}
/**
* <p>Return all the files that match filePattern and are not checksum
* files. Results are sorted by their names.
*
* <p>
* A filename pattern is composed of <i>regular</i> characters and
* <i>special pattern matching</i> characters, which are:
*
* <dl>
* <dd>
* <dl>
* <p>
* <dt> <tt> ? </tt>
* <dd> Matches any single character.
*
* <p>
* <dt> <tt> * </tt>
* <dd> Matches zero or more characters.
*
* <p>
* <dt> <tt> [<i>abc</i>] </tt>
* <dd> Matches a single character from character set
* <tt>{<i>a,b,c</i>}</tt>.
*
* <p>
* <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
* <dd> Matches a single character from the character range
* <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be
* lexicographically less than or equal to character <tt><i>b</i></tt>.
*
* <p>
* <dt> <tt> [^<i>a</i>] </tt>
* <dd> Matches a single character that is not from character set or range
* <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur
* immediately to the right of the opening bracket.
*
* <p>
* <dt> <tt> \<i>c</i> </tt>
* <dd> Removes (escapes) any special meaning of character <i>c</i>.
*
* <p>
* <dt> <tt> {ab,cd} </tt>
* <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
*
* <p>
* <dt> <tt> {ab,c{de,fh}} </tt>
* <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt>
*
* </dl>
* </dd>
* </dl>
*
* @param pathPattern a regular expression specifying a pth pattern
* @return an array of paths that match the path pattern
* @throws IOException
*/
public FileStatus[] globStatus(Path pathPattern) throws IOException {
return new Globber(this, pathPattern, DEFAULT_FILTER).glob();
}
/**
* Return an array of FileStatus objects whose path names match pathPattern
* and is accepted by the user-supplied path filter. Results are sorted by
* their path names.
* Return null if pathPattern has no glob and the path does not exist.
* Return an empty array if pathPattern has a glob and no path matches it.
*
* @param pathPattern
* a regular expression specifying the path pattern
* @param filter
* a user-supplied path filter
* @return an array of FileStatus objects
* @throws IOException if any I/O error occurs when fetching file status
*/
public FileStatus[] globStatus(Path pathPattern, PathFilter filter)
throws IOException {
return new Globber(this, pathPattern, filter).glob();
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
* Return the file's status and block locations If the path is a file.
*
* If a returned status is a file, it contains the file's block locations.
*
* @param f is the path
*
* @return an iterator that traverses statuses of the files/directories
* in the given path
*
* @throws FileNotFoundException If <code>f</code> does not exist
* @throws IOException If an I/O error occurred
*/
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
throws FileNotFoundException, IOException {
return listLocatedStatus(f, DEFAULT_FILTER);
}
/**
* Listing a directory
* The returned results include its block location if it is a file
* The results are filtered by the given path filter
* @param f a path
* @param filter a path filter
* @return an iterator that traverses statuses of the files/directories
* in the given path
* @throws FileNotFoundException if <code>f</code> does not exist
* @throws IOException if any I/O error occurred
*/
protected RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f,
final PathFilter filter)
throws FileNotFoundException, IOException {
return new RemoteIterator<LocatedFileStatus>() {
private final FileStatus[] stats = listStatus(f, filter);
private int i = 0;
@Override
public boolean hasNext() {
return i<stats.length;
}
@Override
public LocatedFileStatus next() throws IOException {
if (!hasNext()) {
throw new NoSuchElementException("No more entry in " + f);
}
FileStatus result = stats[i++];
BlockLocation[] locs = result.isFile() ?
getFileBlockLocations(result.getPath(), 0, result.getLen()) :
null;
return new LocatedFileStatus(result, locs);
}
};
}
/**
* Returns a remote iterator so that followup calls are made on demand
* while consuming the entries. Each file system implementation should
* override this method and provide a more efficient implementation, if
* possible.
*
* @param p target path
* @return remote iterator
*/
public RemoteIterator<FileStatus> listStatusIterator(final Path p)
throws FileNotFoundException, IOException {
return new RemoteIterator<FileStatus>() {
private final FileStatus[] stats = listStatus(p);
private int i = 0;
@Override
public boolean hasNext() {
return i<stats.length;
}
@Override
public FileStatus next() throws IOException {
if (!hasNext()) {
throw new NoSuchElementException("No more entry in " + p);
}
return stats[i++];
}
};
}
/**
* List the statuses and block locations of the files in the given path.
*
* If the path is a directory,
* if recursive is false, returns files in the directory;
* if recursive is true, return files in the subtree rooted at the path.
* If the path is a file, return the file's status and block locations.
*
* @param f is the path
* @param recursive if the subdirectories need to be traversed recursively
*
* @return an iterator that traverses statuses of the files
*
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
public RemoteIterator<LocatedFileStatus> listFiles(
final Path f, final boolean recursive)
throws FileNotFoundException, IOException {
return new RemoteIterator<LocatedFileStatus>() {
private Stack<RemoteIterator<LocatedFileStatus>> itors =
new Stack<RemoteIterator<LocatedFileStatus>>();
private RemoteIterator<LocatedFileStatus> curItor =
listLocatedStatus(f);
private LocatedFileStatus curFile;
@Override
public boolean hasNext() throws IOException {
while (curFile == null) {
if (curItor.hasNext()) {
handleFileStat(curItor.next());
} else if (!itors.empty()) {
curItor = itors.pop();
} else {
return false;
}
}
return true;
}
/**
* Process the input stat.
* If it is a file, return the file stat.
* If it is a directory, traverse the directory if recursive is true;
* ignore it if recursive is false.
* @param stat input status
* @throws IOException if any IO error occurs
*/
private void handleFileStat(LocatedFileStatus stat) throws IOException {
if (stat.isFile()) { // file
curFile = stat;
} else if (recursive) { // directory
itors.push(curItor);
curItor = listLocatedStatus(stat.getPath());
}
}
@Override
public LocatedFileStatus next() throws IOException {
if (hasNext()) {
LocatedFileStatus result = curFile;
curFile = null;
return result;
}
throw new java.util.NoSuchElementException("No more entry in " + f);
}
};
}
/** Return the current user's home directory in this filesystem.
* The default implementation returns "/user/$USER/".
*/
public Path getHomeDirectory() {
return this.makeQualified(
new Path("/user/"+System.getProperty("user.name")));
}
/**
* Set the current working directory for the given file system. All relative
* paths will be resolved relative to it.
*
* @param new_dir
*/
public abstract void setWorkingDirectory(Path new_dir);
/**
* Get the current working directory for the given file system
* @return the directory pathname
*/
public abstract Path getWorkingDirectory();
/**
* Note: with the new FilesContext class, getWorkingDirectory()
* will be removed.
* The working directory is implemented in FilesContext.
*
* Some file systems like LocalFileSystem have an initial workingDir
* that we use as the starting workingDir. For other file systems
* like HDFS there is no built in notion of an initial workingDir.
*
* @return if there is built in notion of workingDir then it
* is returned; else a null is returned.
*/
protected Path getInitialWorkingDirectory() {
return null;
}
/**
* Call {@link #mkdirs(Path, FsPermission)} with default permission.
*/
public boolean mkdirs(Path f) throws IOException {
return mkdirs(f, FsPermission.getDirDefault());
}
/**
* Make the given file and all non-existent parents into
* directories. Has the semantics of Unix 'mkdir -p'.
* Existence of the directory hierarchy is not an error.
* @param f path to create
* @param permission to apply to f
*/
public abstract boolean mkdirs(Path f, FsPermission permission
) throws IOException;
/**
* The src file is on the local disk. Add it to FS at
* the given dst name and the source is kept intact afterwards
* @param src path
* @param dst path
*/
public void copyFromLocalFile(Path src, Path dst)
throws IOException {
copyFromLocalFile(false, src, dst);
}
/**
* The src files is on the local disk. Add it to FS at
* the given dst name, removing the source afterwards.
* @param srcs path
* @param dst path
*/
public void moveFromLocalFile(Path[] srcs, Path dst)
throws IOException {
copyFromLocalFile(true, true, srcs, dst);
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name, removing the source afterwards.
* @param src path
* @param dst path
*/
public void moveFromLocalFile(Path src, Path dst)
throws IOException {
copyFromLocalFile(true, src, dst);
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param src path
* @param dst path
*/
public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
copyFromLocalFile(delSrc, true, src, dst);
}
/**
* The src files are on the local disk. Add it to FS at
* the given dst name.
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param srcs array of paths which are source
* @param dst path
*/
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path[] srcs, Path dst)
throws IOException {
Configuration conf = getConf();
FileUtil.copy(getLocal(conf), srcs, this, dst, delSrc, overwrite, conf);
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
*/
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path src, Path dst)
throws IOException {
Configuration conf = getConf();
FileUtil.copy(getLocal(conf), src, this, dst, delSrc, overwrite, conf);
}
/**
* The src file is under FS, and the dst is on the local disk.
* Copy it from FS control to the local dst name.
* @param src path
* @param dst path
*/
public void copyToLocalFile(Path src, Path dst) throws IOException {
copyToLocalFile(false, src, dst);
}
/**
* The src file is under FS, and the dst is on the local disk.
* Copy it from FS control to the local dst name.
* Remove the source afterwards
* @param src path
* @param dst path
*/
public void moveToLocalFile(Path src, Path dst) throws IOException {
copyToLocalFile(true, src, dst);
}
/**
* The src file is under FS, and the dst is on the local disk.
* Copy it from FS control to the local dst name.
* delSrc indicates if the src will be removed or not.
* @param delSrc whether to delete the src
* @param src path
* @param dst path
*/
public void copyToLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
copyToLocalFile(delSrc, src, dst, false);
}
/**
* The src file is under FS, and the dst is on the local disk. Copy it from FS
* control to the local dst name. delSrc indicates if the src will be removed
* or not. useRawLocalFileSystem indicates whether to use RawLocalFileSystem
* as local file system or not. RawLocalFileSystem is non crc file system.So,
* It will not create any crc files at local.
*
* @param delSrc
* whether to delete the src
* @param src
* path
* @param dst
* path
* @param useRawLocalFileSystem
* whether to use RawLocalFileSystem as local file system or not.
*
* @throws IOException
* - if any IO error
*/
public void copyToLocalFile(boolean delSrc, Path src, Path dst,
boolean useRawLocalFileSystem) throws IOException {
Configuration conf = getConf();
FileSystem local = null;
if (useRawLocalFileSystem) {
local = getLocal(conf).getRawFileSystem();
} else {
local = getLocal(conf);
}
FileUtil.copy(this, src, local, dst, delSrc, conf);
}
/**
* Returns a local File that the user can write output to. The caller
* provides both the eventual FS target name and the local working
* file. If the FS is local, we write directly into the target. If
* the FS is remote, we write into the tmp local area.
* @param fsOutputFile path of output file
* @param tmpLocalFile path of local tmp file
*/
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
return tmpLocalFile;
}
/**
* Called when we're all done writing to the target. A local FS will
* do nothing, because we've written to exactly the right place. A remote
* FS will copy the contents of tmpLocalFile to the correct target at
* fsOutputFile.
* @param fsOutputFile path of output file
* @param tmpLocalFile path to local tmp file
*/
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
moveFromLocalFile(tmpLocalFile, fsOutputFile);
}
/**
* No more filesystem operations are needed. Will
* release any held locks.
*/
@Override
public void close() throws IOException {
// delete all files that were marked as delete-on-exit.
processDeleteOnExit();
CACHE.remove(this.key, this);
}
/** Return the total size of all files in the filesystem.*/
public long getUsed() throws IOException{
long used = 0;
RemoteIterator<LocatedFileStatus> files = listFiles(new Path("/"), true);
while (files.hasNext()) {
used += files.next().getLen();
}
return used;
}
/**
* Get the block size for a particular file.
* @param f the filename
* @return the number of bytes in a block
*/
/** @deprecated Use getFileStatus() instead */
@Deprecated
public long getBlockSize(Path f) throws IOException {
return getFileStatus(f).getBlockSize();
}
/**
* Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time.
* @deprecated use {@link #getDefaultBlockSize(Path)} instead
*/
@Deprecated
public long getDefaultBlockSize() {
// default to 32MB: large enough to minimize the impact of seeks
return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
}
/** Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time. The given path will be used to
* locate the actual filesystem. The full path does not have to exist.
* @param f path of file
* @return the default block size for the path's filesystem
*/
public long getDefaultBlockSize(Path f) {
return getDefaultBlockSize();
}
/**
* Get the default replication.
* @deprecated use {@link #getDefaultReplication(Path)} instead
*/
@Deprecated
public short getDefaultReplication() { return 1; }
/**
* Get the default replication for a path. The given path will be used to
* locate the actual filesystem. The full path does not have to exist.
* @param path of the file
* @return default replication for the path's filesystem
*/
public short getDefaultReplication(Path path) {
return getDefaultReplication();
}
/**
* Return a file status object that represents the path.
* @param f The path we want information from
* @return a FileStatus object
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
public abstract FileStatus getFileStatus(Path f) throws IOException;
/**
* Checks if the user can access a path. The mode specifies which access
* checks to perform. If the requested permissions are granted, then the
* method returns normally. If access is denied, then the method throws an
* {@link AccessControlException}.
* <p/>
* The default implementation of this method calls {@link #getFileStatus(Path)}
* and checks the returned permissions against the requested permissions.
* Note that the getFileStatus call will be subject to authorization checks.
* Typically, this requires search (execute) permissions on each directory in
* the path's prefix, but this is implementation-defined. Any file system
* that provides a richer authorization model (such as ACLs) may override the
* default implementation so that it checks against that model instead.
* <p>
* In general, applications should avoid using this method, due to the risk of
* time-of-check/time-of-use race conditions. The permissions on a file may
* change immediately after the access call returns. Most applications should
* prefer running specific file system actions as the desired user represented
* by a {@link UserGroupInformation}.
*
* @param path Path to check
* @param mode type of access to check
* @throws AccessControlException if access is denied
* @throws FileNotFoundException if the path does not exist
* @throws IOException see specific implementation
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, IOException {
checkAccessPermissions(this.getFileStatus(path), mode);
}
/**
* This method provides the default implementation of
* {@link #access(Path, FsAction)}.
*
* @param stat FileStatus to check
* @param mode type of access to check
* @throws IOException for any error
*/
@InterfaceAudience.Private
static void checkAccessPermissions(FileStatus stat, FsAction mode)
throws IOException {
FsPermission perm = stat.getPermission();
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String user = ugi.getShortUserName();
List<String> groups = Arrays.asList(ugi.getGroupNames());
if (user.equals(stat.getOwner())) {
if (perm.getUserAction().implies(mode)) {
return;
}
} else if (groups.contains(stat.getGroup())) {
if (perm.getGroupAction().implies(mode)) {
return;
}
} else {
if (perm.getOtherAction().implies(mode)) {
return;
}
}
throw new AccessControlException(String.format(
"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
}
/**
* See {@link FileContext#fixRelativePart}
*/
protected Path fixRelativePart(Path p) {
if (p.isUriPathAbsolute()) {
return p;
} else {
return new Path(getWorkingDirectory(), p);
}
}
/**
* See {@link FileContext#createSymlink(Path, Path, boolean)}
*/
public void createSymlink(final Path target, final Path link,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
IOException {
// Supporting filesystems should override this method
throw new UnsupportedOperationException(
"Filesystem does not support symlinks!");
}
/**
* See {@link FileContext#getFileLinkStatus(Path)}
*/
public FileStatus getFileLinkStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
// Supporting filesystems should override this method
return getFileStatus(f);
}
/**
* See {@link AbstractFileSystem#supportsSymlinks()}
*/
public boolean supportsSymlinks() {
return false;
}
/**
* See {@link FileContext#getLinkTarget(Path)}
*/
public Path getLinkTarget(Path f) throws IOException {
// Supporting filesystems should override this method
throw new UnsupportedOperationException(
"Filesystem does not support symlinks!");
}
/**
* See {@link AbstractFileSystem#getLinkTarget(Path)}
*/
protected Path resolveLink(Path f) throws IOException {
// Supporting filesystems should override this method
throw new UnsupportedOperationException(
"Filesystem does not support symlinks!");
}
/**
* Get the checksum of a file.
*
* @param f The file path
* @return The file checksum. The default return value is null,
* which indicates that no checksum algorithm is implemented
* in the corresponding FileSystem.
*/
public FileChecksum getFileChecksum(Path f) throws IOException {
return getFileChecksum(f, Long.MAX_VALUE);
}
/**
* Get the checksum of a file, from the beginning of the file till the
* specific length.
* @param f The file path
* @param length The length of the file range for checksum calculation
* @return The file checksum.
*/
public FileChecksum getFileChecksum(Path f, final long length)
throws IOException {
return null;
}
/**
* Set the verify checksum flag. This is only applicable if the
* corresponding FileSystem supports checksum. By default doesn't do anything.
* @param verifyChecksum
*/
public void setVerifyChecksum(boolean verifyChecksum) {
//doesn't do anything
}
/**
* Set the write checksum flag. This is only applicable if the
* corresponding FileSystem supports checksum. By default doesn't do anything.
* @param writeChecksum
*/
public void setWriteChecksum(boolean writeChecksum) {
//doesn't do anything
}
/**
* Returns a status object describing the use and capacity of the
* file system. If the file system has multiple partitions, the
* use and capacity of the root partition is reflected.
*
* @return a FsStatus object
* @throws IOException
* see specific implementation
*/
public FsStatus getStatus() throws IOException {
return getStatus(null);
}
/**
* Returns a status object describing the use and capacity of the
* file system. If the file system has multiple partitions, the
* use and capacity of the partition pointed to by the specified
* path is reflected.
* @param p Path for which status should be obtained. null means
* the default partition.
* @return a FsStatus object
* @throws IOException
* see specific implementation
*/
public FsStatus getStatus(Path p) throws IOException {
return new FsStatus(Long.MAX_VALUE, 0, Long.MAX_VALUE);
}
/**
* Set permission of a path.
* @param p
* @param permission
*/
public void setPermission(Path p, FsPermission permission
) throws IOException {
}
/**
* Set owner of a path (i.e. a file or a directory).
* The parameters username and groupname cannot both be null.
* @param p The path
* @param username If it is null, the original username remains unchanged.
* @param groupname If it is null, the original groupname remains unchanged.
*/
public void setOwner(Path p, String username, String groupname
) throws IOException {
}
/**
* Set access time of a file
* @param p The path
* @param mtime Set the modification time of this file.
* The number of milliseconds since Jan 1, 1970.
* A value of -1 means that this call should not set modification time.
* @param atime Set the access time of this file.
* The number of milliseconds since Jan 1, 1970.
* A value of -1 means that this call should not set access time.
*/
public void setTimes(Path p, long mtime, long atime
) throws IOException {
}
/**
* Create a snapshot with a default name.
* @param path The directory where snapshots will be taken.
* @return the snapshot path.
*/
public final Path createSnapshot(Path path) throws IOException {
return createSnapshot(path, null);
}
/**
* Create a snapshot
* @param path The directory where snapshots will be taken.
* @param snapshotName The name of the snapshot
* @return the snapshot path.
*/
public Path createSnapshot(Path path, String snapshotName)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support createSnapshot");
}
/**
* Rename a snapshot
* @param path The directory path where the snapshot was taken
* @param snapshotOldName Old name of the snapshot
* @param snapshotNewName New name of the snapshot
* @throws IOException
*/
public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support renameSnapshot");
}
/**
* Delete a snapshot of a directory
* @param path The directory that the to-be-deleted snapshot belongs to
* @param snapshotName The name of the snapshot
*/
public void deleteSnapshot(Path path, String snapshotName)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support deleteSnapshot");
}
/**
* Modifies ACL entries of files and directories. This method can add new ACL
* entries or modify the permissions on existing ACL entries. All existing
* ACL entries that are not specified in this call are retained without
* changes. (Modifications are merged into the current ACL.)
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications
* @throws IOException if an ACL could not be modified
*/
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support modifyAclEntries");
}
/**
* Removes ACL entries from files and directories. Other ACL entries are
* retained.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing entries to remove
* @throws IOException if an ACL could not be modified
*/
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAclEntries");
}
/**
* Removes all default ACL entries from files and directories.
*
* @param path Path to modify
* @throws IOException if an ACL could not be modified
*/
public void removeDefaultAcl(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeDefaultAcl");
}
/**
* Removes all but the base ACL entries of files and directories. The entries
* for user, group, and others are retained for compatibility with permission
* bits.
*
* @param path Path to modify
* @throws IOException if an ACL could not be removed
*/
public void removeAcl(Path path)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeAcl");
}
/**
* Fully replaces ACL of files and directories, discarding all existing
* entries.
*
* @param path Path to modify
* @param aclSpec List<AclEntry> describing modifications, must include entries
* for user, group, and others for compatibility with permission bits.
* @throws IOException if an ACL could not be modified
*/
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setAcl");
}
/**
* Gets the ACL of a file or directory.
*
* @param path Path to get
* @return AclStatus describing the ACL of the file or directory
* @throws IOException if an ACL could not be read
*/
public AclStatus getAclStatus(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getAclStatus");
}
/**
* Set an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to modify
* @param name xattr name.
* @param value xattr value.
* @throws IOException
*/
public void setXAttr(Path path, String name, byte[] value)
throws IOException {
setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
}
/**
* Set an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to modify
* @param name xattr name.
* @param value xattr value.
* @param flag xattr set flag
* @throws IOException
*/
public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setXAttr");
}
/**
* Get an xattr name and value for a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attribute
* @param name xattr name.
* @return byte[] xattr value.
* @throws IOException
*/
public byte[] getXAttr(Path path, String name) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttr");
}
/**
* Get all of the xattr name/value pairs for a file or directory.
* Only those xattrs which the logged-in user has permissions to view
* are returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @throws IOException
*/
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttrs");
}
/**
* Get all of the xattrs name/value pairs for a file or directory.
* Only those xattrs which the logged-in user has permissions to view
* are returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @param names XAttr names.
* @return Map<String, byte[]> describing the XAttrs of the file or directory
* @throws IOException
*/
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getXAttrs");
}
/**
* Get all of the xattr names for a file or directory.
* Only those xattr names which the logged-in user has permissions to view
* are returned.
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to get extended attributes
* @return List<String> of the XAttr names of the file or directory
* @throws IOException
*/
public List<String> listXAttrs(Path path) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support listXAttrs");
}
/**
* Remove an xattr of a file or directory.
* The name must be prefixed with the namespace followed by ".". For example,
* "user.attr".
* <p/>
* Refer to the HDFS extended attributes user documentation for details.
*
* @param path Path to remove extended attribute
* @param name xattr name
* @throws IOException
*/
public void removeXAttr(Path path, String name) throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support removeXAttr");
}
/**
* Set the storage policy for a given file or directory.
*
* @param src file or directory path.
* @param policyName the name of the target storage policy. The list
* of supported Storage policies can be retrieved
* via {@link #getAllStoragePolicies}.
* @throws IOException
*/
public void setStoragePolicy(final Path src, final String policyName)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support setStoragePolicy");
}
/**
* Query the effective storage policy ID for the given file or directory.
*
* @param src file or directory path.
* @return storage policy for give file.
* @throws IOException
*/
public BlockStoragePolicySpi getStoragePolicy(final Path src)
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getStoragePolicy");
}
/**
* Retrieve all the storage policies supported by this file system.
*
* @return all storage policies supported by this filesystem.
* @throws IOException
*/
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
throws IOException {
throw new UnsupportedOperationException(getClass().getSimpleName()
+ " doesn't support getAllStoragePolicies");
}
// making it volatile to be able to do a double checked locking
private volatile static boolean FILE_SYSTEMS_LOADED = false;
private static final Map<String, Class<? extends FileSystem>>
SERVICE_FILE_SYSTEMS = new HashMap<String, Class<? extends FileSystem>>();
private static void loadFileSystems() {
synchronized (FileSystem.class) {
if (!FILE_SYSTEMS_LOADED) {
ServiceLoader<FileSystem> serviceLoader = ServiceLoader.load(FileSystem.class);
for (FileSystem fs : serviceLoader) {
SERVICE_FILE_SYSTEMS.put(fs.getScheme(), fs.getClass());
}
FILE_SYSTEMS_LOADED = true;
}
}
}
public static Class<? extends FileSystem> getFileSystemClass(String scheme,
Configuration conf) throws IOException {
if (!FILE_SYSTEMS_LOADED) {
loadFileSystems();
}
Class<? extends FileSystem> clazz = null;
if (conf != null) {
clazz = (Class<? extends FileSystem>) conf.getClass("fs." + scheme + ".impl", null);
}
if (clazz == null) {
clazz = SERVICE_FILE_SYSTEMS.get(scheme);
}
if (clazz == null) {
throw new IOException("No FileSystem for scheme: " + scheme);
}
return clazz;
}
private static FileSystem createFileSystem(URI uri, Configuration conf
) throws IOException {
TraceScope scope = Trace.startSpan("FileSystem#createFileSystem");
Span span = scope.getSpan();
if (span != null) {
span.addKVAnnotation("scheme", uri.getScheme());
}
try {
Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
fs.initialize(uri, conf);
return fs;
} finally {
scope.close();
}
}
/** Caching FileSystem objects */
static class Cache {
private final ClientFinalizer clientFinalizer = new ClientFinalizer();
private final Map<Key, FileSystem> map = new HashMap<Key, FileSystem>();
private final Set<Key> toAutoClose = new HashSet<Key>();
/** A variable that makes all objects in the cache unique */
private static AtomicLong unique = new AtomicLong(1);
FileSystem get(URI uri, Configuration conf) throws IOException{
Key key = new Key(uri, conf);
return getInternal(uri, conf, key);
}
/** The objects inserted into the cache using this method are all unique */
FileSystem getUnique(URI uri, Configuration conf) throws IOException{
Key key = new Key(uri, conf, unique.getAndIncrement());
return getInternal(uri, conf, key);
}
private FileSystem getInternal(URI uri, Configuration conf, Key key) throws IOException{
FileSystem fs;
synchronized (this) {
fs = map.get(key);
}
if (fs != null) {
return fs;
}
fs = createFileSystem(uri, conf);
synchronized (this) { // refetch the lock again
FileSystem oldfs = map.get(key);
if (oldfs != null) { // a file system is created while lock is releasing
fs.close(); // close the new file system
return oldfs; // return the old file system
}
// now insert the new file system into the map
if (map.isEmpty()
&& !ShutdownHookManager.get().isShutdownInProgress()) {
ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY);
}
fs.key = key;
map.put(key, fs);
if (conf.getBoolean("fs.automatic.close", true)) {
toAutoClose.add(key);
}
return fs;
}
}
synchronized void remove(Key key, FileSystem fs) {
FileSystem cachedFs = map.remove(key);
if (fs == cachedFs) {
toAutoClose.remove(key);
} else if (cachedFs != null) {
map.put(key, cachedFs);
}
}
synchronized void closeAll() throws IOException {
closeAll(false);
}
/**
* Close all FileSystem instances in the Cache.
* @param onlyAutomatic only close those that are marked for automatic closing
*/
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
List<IOException> exceptions = new ArrayList<IOException>();
// Make a copy of the keys in the map since we'll be modifying
// the map while iterating over it, which isn't safe.
List<Key> keys = new ArrayList<Key>();
keys.addAll(map.keySet());
for (Key key : keys) {
final FileSystem fs = map.get(key);
if (onlyAutomatic && !toAutoClose.contains(key)) {
continue;
}
//remove from cache
map.remove(key);
toAutoClose.remove(key);
if (fs != null) {
try {
fs.close();
}
catch(IOException ioe) {
exceptions.add(ioe);
}
}
}
if (!exceptions.isEmpty()) {
throw MultipleIOException.createIOException(exceptions);
}
}
private class ClientFinalizer implements Runnable {
@Override
public synchronized void run() {
try {
closeAll(true);
} catch (IOException e) {
LOG.info("FileSystem.Cache.closeAll() threw an exception:\n" + e);
}
}
}
synchronized void closeAll(UserGroupInformation ugi) throws IOException {
List<FileSystem> targetFSList = new ArrayList<FileSystem>();
//Make a pass over the list and collect the filesystems to close
//we cannot close inline since close() removes the entry from the Map
for (Map.Entry<Key, FileSystem> entry : map.entrySet()) {
final Key key = entry.getKey();
final FileSystem fs = entry.getValue();
if (ugi.equals(key.ugi) && fs != null) {
targetFSList.add(fs);
}
}
List<IOException> exceptions = new ArrayList<IOException>();
//now make a pass over the target list and close each
for (FileSystem fs : targetFSList) {
try {
fs.close();
}
catch(IOException ioe) {
exceptions.add(ioe);
}
}
if (!exceptions.isEmpty()) {
throw MultipleIOException.createIOException(exceptions);
}
}
/** FileSystem.Cache.Key */
static class Key {
final String scheme;
final String authority;
final UserGroupInformation ugi;
final long unique; // an artificial way to make a key unique
Key(URI uri, Configuration conf) throws IOException {
this(uri, conf, 0);
}
Key(URI uri, Configuration conf, long unique) throws IOException {
scheme = uri.getScheme()==null ?
"" : StringUtils.toLowerCase(uri.getScheme());
authority = uri.getAuthority()==null ?
"" : StringUtils.toLowerCase(uri.getAuthority());
this.unique = unique;
this.ugi = UserGroupInformation.getCurrentUser();
}
@Override
public int hashCode() {
return (scheme + authority).hashCode() + ugi.hashCode() + (int)unique;
}
static boolean isEqual(Object a, Object b) {
return a == b || (a != null && a.equals(b));
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj != null && obj instanceof Key) {
Key that = (Key)obj;
return isEqual(this.scheme, that.scheme)
&& isEqual(this.authority, that.authority)
&& isEqual(this.ugi, that.ugi)
&& (this.unique == that.unique);
}
return false;
}
@Override
public String toString() {
return "("+ugi.toString() + ")@" + scheme + "://" + authority;
}
}
}
/**
* Tracks statistics about how many reads, writes, and so forth have been
* done in a FileSystem.
*
* Since there is only one of these objects per FileSystem, there will
* typically be many threads writing to this object. Almost every operation
* on an open file will involve a write to this object. In contrast, reading
* statistics is done infrequently by most programs, and not at all by others.
* Hence, this is optimized for writes.
*
* Each thread writes to its own thread-local area of memory. This removes
* contention and allows us to scale up to many, many threads. To read
* statistics, the reader thread totals up the contents of all of the
* thread-local data areas.
*/
public static final class Statistics {
/**
* Statistics data.
*
* There is only a single writer to thread-local StatisticsData objects.
* Hence, volatile is adequate here-- we do not need AtomicLong or similar
* to prevent lost updates.
* The Java specification guarantees that updates to volatile longs will
* be perceived as atomic with respect to other threads, which is all we
* need.
*/
public static class StatisticsData {
volatile long bytesRead;
volatile long bytesWritten;
volatile int readOps;
volatile int largeReadOps;
volatile int writeOps;
/**
* Add another StatisticsData object to this one.
*/
void add(StatisticsData other) {
this.bytesRead += other.bytesRead;
this.bytesWritten += other.bytesWritten;
this.readOps += other.readOps;
this.largeReadOps += other.largeReadOps;
this.writeOps += other.writeOps;
}
/**
* Negate the values of all statistics.
*/
void negate() {
this.bytesRead = -this.bytesRead;
this.bytesWritten = -this.bytesWritten;
this.readOps = -this.readOps;
this.largeReadOps = -this.largeReadOps;
this.writeOps = -this.writeOps;
}
@Override
public String toString() {
return bytesRead + " bytes read, " + bytesWritten + " bytes written, "
+ readOps + " read ops, " + largeReadOps + " large read ops, "
+ writeOps + " write ops";
}
public long getBytesRead() {
return bytesRead;
}
public long getBytesWritten() {
return bytesWritten;
}
public int getReadOps() {
return readOps;
}
public int getLargeReadOps() {
return largeReadOps;
}
public int getWriteOps() {
return writeOps;
}
}
private interface StatisticsAggregator<T> {
void accept(StatisticsData data);
T aggregate();
}
private final String scheme;
/**
* rootData is data that doesn't belong to any thread, but will be added
* to the totals. This is useful for making copies of Statistics objects,
* and for storing data that pertains to threads that have been garbage
* collected. Protected by the Statistics lock.
*/
private final StatisticsData rootData;
/**
* Thread-local data.
*/
private final ThreadLocal<StatisticsData> threadData;
/**
* Set of all thread-local data areas. Protected by the Statistics lock.
* The references to the statistics data are kept using phantom references
* to the associated threads. Proper clean-up is performed by the cleaner
* thread when the threads are garbage collected.
*/
private final Set<StatisticsDataReference> allData;
/**
* Global reference queue and a cleaner thread that manage statistics data
* references from all filesystem instances.
*/
private static final ReferenceQueue<Thread> STATS_DATA_REF_QUEUE;
private static final Thread STATS_DATA_CLEANER;
static {
STATS_DATA_REF_QUEUE = new ReferenceQueue<Thread>();
// start a single daemon cleaner thread
STATS_DATA_CLEANER = new Thread(new StatisticsDataReferenceCleaner());
STATS_DATA_CLEANER.
setName(StatisticsDataReferenceCleaner.class.getName());
STATS_DATA_CLEANER.setDaemon(true);
STATS_DATA_CLEANER.start();
}
public Statistics(String scheme) {
this.scheme = scheme;
this.rootData = new StatisticsData();
this.threadData = new ThreadLocal<StatisticsData>();
this.allData = new HashSet<StatisticsDataReference>();
}
/**
* Copy constructor.
*
* @param other The input Statistics object which is cloned.
*/
public Statistics(Statistics other) {
this.scheme = other.scheme;
this.rootData = new StatisticsData();
other.visitAll(new StatisticsAggregator<Void>() {
@Override
public void accept(StatisticsData data) {
rootData.add(data);
}
public Void aggregate() {
return null;
}
});
this.threadData = new ThreadLocal<StatisticsData>();
this.allData = new HashSet<StatisticsDataReference>();
}
/**
* A phantom reference to a thread that also includes the data associated
* with that thread. On the thread being garbage collected, it is enqueued
* to the reference queue for clean-up.
*/
private class StatisticsDataReference extends PhantomReference<Thread> {
private final StatisticsData data;
public StatisticsDataReference(StatisticsData data, Thread thread) {
super(thread, STATS_DATA_REF_QUEUE);
this.data = data;
}
public StatisticsData getData() {
return data;
}
/**
* Performs clean-up action when the associated thread is garbage
* collected.
*/
public void cleanUp() {
// use the statistics lock for safety
synchronized (Statistics.this) {
/*
* If the thread that created this thread-local data no longer exists,
* remove the StatisticsData from our list and fold the values into
* rootData.
*/
rootData.add(data);
allData.remove(this);
}
}
}
/**
* Background action to act on references being removed.
*/
private static class StatisticsDataReferenceCleaner implements Runnable {
@Override
public void run() {
while (true) {
try {
StatisticsDataReference ref =
(StatisticsDataReference)STATS_DATA_REF_QUEUE.remove();
ref.cleanUp();
} catch (Throwable th) {
// the cleaner thread should continue to run even if there are
// exceptions, including InterruptedException
LOG.warn("exception in the cleaner thread but it will continue to "
+ "run", th);
}
}
}
}
/**
* Get or create the thread-local data associated with the current thread.
*/
public StatisticsData getThreadStatistics() {
StatisticsData data = threadData.get();
if (data == null) {
data = new StatisticsData();
threadData.set(data);
StatisticsDataReference ref =
new StatisticsDataReference(data, Thread.currentThread());
synchronized(this) {
allData.add(ref);
}
}
return data;
}
/**
* Increment the bytes read in the statistics
* @param newBytes the additional bytes read
*/
public void incrementBytesRead(long newBytes) {
getThreadStatistics().bytesRead += newBytes;
}
/**
* Increment the bytes written in the statistics
* @param newBytes the additional bytes written
*/
public void incrementBytesWritten(long newBytes) {
getThreadStatistics().bytesWritten += newBytes;
}
/**
* Increment the number of read operations
* @param count number of read operations
*/
public void incrementReadOps(int count) {
getThreadStatistics().readOps += count;
}
/**
* Increment the number of large read operations
* @param count number of large read operations
*/
public void incrementLargeReadOps(int count) {
getThreadStatistics().largeReadOps += count;
}
/**
* Increment the number of write operations
* @param count number of write operations
*/
public void incrementWriteOps(int count) {
getThreadStatistics().writeOps += count;
}
/**
* Apply the given aggregator to all StatisticsData objects associated with
* this Statistics object.
*
* For each StatisticsData object, we will call accept on the visitor.
* Finally, at the end, we will call aggregate to get the final total.
*
* @param The visitor to use.
* @return The total.
*/
private synchronized <T> T visitAll(StatisticsAggregator<T> visitor) {
visitor.accept(rootData);
for (StatisticsDataReference ref: allData) {
StatisticsData data = ref.getData();
visitor.accept(data);
}
return visitor.aggregate();
}
/**
* Get the total number of bytes read
* @return the number of bytes
*/
public long getBytesRead() {
return visitAll(new StatisticsAggregator<Long>() {
private long bytesRead = 0;
@Override
public void accept(StatisticsData data) {
bytesRead += data.bytesRead;
}
public Long aggregate() {
return bytesRead;
}
});
}
/**
* Get the total number of bytes written
* @return the number of bytes
*/
public long getBytesWritten() {
return visitAll(new StatisticsAggregator<Long>() {
private long bytesWritten = 0;
@Override
public void accept(StatisticsData data) {
bytesWritten += data.bytesWritten;
}
public Long aggregate() {
return bytesWritten;
}
});
}
/**
* Get the number of file system read operations such as list files
* @return number of read operations
*/
public int getReadOps() {
return visitAll(new StatisticsAggregator<Integer>() {
private int readOps = 0;
@Override
public void accept(StatisticsData data) {
readOps += data.readOps;
readOps += data.largeReadOps;
}
public Integer aggregate() {
return readOps;
}
});
}
/**
* Get the number of large file system read operations such as list files
* under a large directory
* @return number of large read operations
*/
public int getLargeReadOps() {
return visitAll(new StatisticsAggregator<Integer>() {
private int largeReadOps = 0;
@Override
public void accept(StatisticsData data) {
largeReadOps += data.largeReadOps;
}
public Integer aggregate() {
return largeReadOps;
}
});
}
/**
* Get the number of file system write operations such as create, append
* rename etc.
* @return number of write operations
*/
public int getWriteOps() {
return visitAll(new StatisticsAggregator<Integer>() {
private int writeOps = 0;
@Override
public void accept(StatisticsData data) {
writeOps += data.writeOps;
}
public Integer aggregate() {
return writeOps;
}
});
}
@Override
public String toString() {
return visitAll(new StatisticsAggregator<String>() {
private StatisticsData total = new StatisticsData();
@Override
public void accept(StatisticsData data) {
total.add(data);
}
public String aggregate() {
return total.toString();
}
});
}
/**
* Resets all statistics to 0.
*
* In order to reset, we add up all the thread-local statistics data, and
* set rootData to the negative of that.
*
* This may seem like a counterintuitive way to reset the statsitics. Why
* can't we just zero out all the thread-local data? Well, thread-local
* data can only be modified by the thread that owns it. If we tried to
* modify the thread-local data from this thread, our modification might get
* interleaved with a read-modify-write operation done by the thread that
* owns the data. That would result in our update getting lost.
*
* The approach used here avoids this problem because it only ever reads
* (not writes) the thread-local data. Both reads and writes to rootData
* are done under the lock, so we're free to modify rootData from any thread
* that holds the lock.
*/
public void reset() {
visitAll(new StatisticsAggregator<Void>() {
private StatisticsData total = new StatisticsData();
@Override
public void accept(StatisticsData data) {
total.add(data);
}
public Void aggregate() {
total.negate();
rootData.add(total);
return null;
}
});
}
/**
* Get the uri scheme associated with this statistics object.
* @return the schema associated with this set of statistics
*/
public String getScheme() {
return scheme;
}
@VisibleForTesting
synchronized int getAllThreadLocalDataSize() {
return allData.size();
}
}
/**
* Get the Map of Statistics object indexed by URI Scheme.
* @return a Map having a key as URI scheme and value as Statistics object
* @deprecated use {@link #getAllStatistics} instead
*/
@Deprecated
public static synchronized Map<String, Statistics> getStatistics() {
Map<String, Statistics> result = new HashMap<String, Statistics>();
for(Statistics stat: statisticsTable.values()) {
result.put(stat.getScheme(), stat);
}
return result;
}
/**
* Return the FileSystem classes that have Statistics
*/
public static synchronized List<Statistics> getAllStatistics() {
return new ArrayList<Statistics>(statisticsTable.values());
}
/**
* Get the statistics for a particular file system
* @param cls the class to lookup
* @return a statistics object
*/
public static synchronized
Statistics getStatistics(String scheme, Class<? extends FileSystem> cls) {
Statistics result = statisticsTable.get(cls);
if (result == null) {
result = new Statistics(scheme);
statisticsTable.put(cls, result);
}
return result;
}
/**
* Reset all statistics for all file systems
*/
public static synchronized void clearStatistics() {
for(Statistics stat: statisticsTable.values()) {
stat.reset();
}
}
/**
* Print all statistics for all file systems
*/
public static synchronized
void printStatistics() throws IOException {
for (Map.Entry<Class<? extends FileSystem>, Statistics> pair:
statisticsTable.entrySet()) {
System.out.println(" FileSystem " + pair.getKey().getName() +
": " + pair.getValue());
}
}
// Symlinks are temporarily disabled - see HADOOP-10020 and HADOOP-10052
private static boolean symlinksEnabled = false;
private static Configuration conf = null;
@VisibleForTesting
public static boolean areSymlinksEnabled() {
return symlinksEnabled;
}
@VisibleForTesting
public static void enableSymlinks() {
symlinksEnabled = true;
}
}
| 116,427 | 32.973738 | 131 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32CastagnoliFileChecksum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.util.DataChecksum;
/** For CRC32 with the Castagnoli polynomial */
public class MD5MD5CRC32CastagnoliFileChecksum extends MD5MD5CRC32FileChecksum {
/** Same as this(0, 0, null) */
public MD5MD5CRC32CastagnoliFileChecksum() {
this(0, 0, null);
}
/** Create a MD5FileChecksum */
public MD5MD5CRC32CastagnoliFileChecksum(int bytesPerCRC, long crcPerBlock, MD5Hash md5) {
super(bytesPerCRC, crcPerBlock, md5);
}
@Override
public DataChecksum.Type getCrcType() {
// default to the one that is understood by all releases.
return DataChecksum.Type.CRC32C;
}
}
| 1,491 | 34.52381 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathAccessDeniedException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/** EACCES */
public class PathAccessDeniedException extends PathIOException {
static final long serialVersionUID = 0L;
/** @param path for the exception */
public PathAccessDeniedException(String path) {
super(path, "Permission denied");
}
}
| 1,092 | 39.481481 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Progressable;
/****************************************************************
* A <code>FilterFileSystem</code> contains
* some other file system, which it uses as
* its basic file system, possibly transforming
* the data along the way or providing additional
* functionality. The class <code>FilterFileSystem</code>
* itself simply overrides all methods of
* <code>FileSystem</code> with versions that
* pass all requests to the contained file
* system. Subclasses of <code>FilterFileSystem</code>
* may further override some of these methods
* and may also provide additional methods
* and fields.
*
*****************************************************************/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FilterFileSystem extends FileSystem {
protected FileSystem fs;
protected String swapScheme;
/*
* so that extending classes can define it
*/
public FilterFileSystem() {
}
public FilterFileSystem(FileSystem fs) {
this.fs = fs;
this.statistics = fs.statistics;
}
/**
* Get the raw file system
* @return FileSystem being filtered
*/
public FileSystem getRawFileSystem() {
return fs;
}
/** Called after a new FileSystem instance is constructed.
* @param name a uri whose authority section names the host, port, etc.
* for this FileSystem
* @param conf the configuration
*/
@Override
public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
// this is less than ideal, but existing filesystems sometimes neglect
// to initialize the embedded filesystem
if (fs.getConf() == null) {
fs.initialize(name, conf);
}
String scheme = name.getScheme();
if (!scheme.equals(fs.getUri().getScheme())) {
swapScheme = scheme;
}
}
/** Returns a URI whose scheme and authority identify this FileSystem.*/
@Override
public URI getUri() {
return fs.getUri();
}
@Override
protected URI getCanonicalUri() {
return fs.getCanonicalUri();
}
@Override
protected URI canonicalizeUri(URI uri) {
return fs.canonicalizeUri(uri);
}
/** Make sure that a path specifies a FileSystem. */
@Override
public Path makeQualified(Path path) {
Path fqPath = fs.makeQualified(path);
// swap in our scheme if the filtered fs is using a different scheme
if (swapScheme != null) {
try {
// NOTE: should deal with authority, but too much other stuff is broken
fqPath = new Path(
new URI(swapScheme, fqPath.toUri().getSchemeSpecificPart(), null)
);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
}
return fqPath;
}
///////////////////////////////////////////////////////////////
// FileSystem
///////////////////////////////////////////////////////////////
/** Check that a Path belongs to this FileSystem. */
@Override
protected void checkPath(Path path) {
fs.checkPath(path);
}
@Override
public BlockLocation[] getFileBlockLocations(FileStatus file, long start,
long len) throws IOException {
return fs.getFileBlockLocations(file, start, len);
}
@Override
public Path resolvePath(final Path p) throws IOException {
return fs.resolvePath(p);
}
/**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open
* @param bufferSize the size of the buffer to be used.
*/
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
return fs.open(f, bufferSize);
}
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
return fs.append(f, bufferSize, progress);
}
@Override
public void concat(Path f, Path[] psrcs) throws IOException {
fs.concat(f, psrcs);
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return fs.create(f, permission,
overwrite, bufferSize, replication, blockSize, progress);
}
@Override
public FSDataOutputStream create(Path f,
FsPermission permission,
EnumSet<CreateFlag> flags,
int bufferSize,
short replication,
long blockSize,
Progressable progress,
ChecksumOpt checksumOpt) throws IOException {
return fs.create(f, permission,
flags, bufferSize, replication, blockSize, progress, checksumOpt);
}
@Override
protected RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f,
final PathFilter filter)
throws FileNotFoundException, IOException {
return fs.listLocatedStatus(f, filter);
}
@Override
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return fs.createNonRecursive(f, permission, flags, bufferSize, replication, blockSize,
progress);
}
/**
* Set replication for an existing file.
*
* @param src file name
* @param replication new replication
* @throws IOException
* @return true if successful;
* false if file does not exist or is a directory
*/
@Override
public boolean setReplication(Path src, short replication) throws IOException {
return fs.setReplication(src, replication);
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*/
@Override
public boolean rename(Path src, Path dst) throws IOException {
return fs.rename(src, dst);
}
@Override
public boolean truncate(Path f, final long newLength) throws IOException {
return fs.truncate(f, newLength);
}
/** Delete a file */
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
return fs.delete(f, recursive);
}
/** List files in a directory. */
@Override
public FileStatus[] listStatus(Path f) throws IOException {
return fs.listStatus(f);
}
@Override
public RemoteIterator<Path> listCorruptFileBlocks(Path path)
throws IOException {
return fs.listCorruptFileBlocks(path);
}
/** List files and its block locations in a directory. */
@Override
public RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f)
throws IOException {
return fs.listLocatedStatus(f);
}
/** Return a remote iterator for listing in a directory */
@Override
public RemoteIterator<FileStatus> listStatusIterator(Path f)
throws IOException {
return fs.listStatusIterator(f);
}
@Override
public Path getHomeDirectory() {
return fs.getHomeDirectory();
}
/**
* Set the current working directory for the given file system. All relative
* paths will be resolved relative to it.
*
* @param newDir
*/
@Override
public void setWorkingDirectory(Path newDir) {
fs.setWorkingDirectory(newDir);
}
/**
* Get the current working directory for the given file system
*
* @return the directory pathname
*/
@Override
public Path getWorkingDirectory() {
return fs.getWorkingDirectory();
}
@Override
protected Path getInitialWorkingDirectory() {
return fs.getInitialWorkingDirectory();
}
@Override
public FsStatus getStatus(Path p) throws IOException {
return fs.getStatus(p);
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
return fs.mkdirs(f, permission);
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
* delSrc indicates if the source should be removed
*/
@Override
public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
fs.copyFromLocalFile(delSrc, src, dst);
}
/**
* The src files are on the local disk. Add it to FS at
* the given dst name.
* delSrc indicates if the source should be removed
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path[] srcs, Path dst)
throws IOException {
fs.copyFromLocalFile(delSrc, overwrite, srcs, dst);
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
* delSrc indicates if the source should be removed
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite,
Path src, Path dst)
throws IOException {
fs.copyFromLocalFile(delSrc, overwrite, src, dst);
}
/**
* The src file is under FS, and the dst is on the local disk.
* Copy it from FS control to the local dst name.
* delSrc indicates if the src will be removed or not.
*/
@Override
public void copyToLocalFile(boolean delSrc, Path src, Path dst)
throws IOException {
fs.copyToLocalFile(delSrc, src, dst);
}
/**
* Returns a local File that the user can write output to. The caller
* provides both the eventual FS target name and the local working
* file. If the FS is local, we write directly into the target. If
* the FS is remote, we write into the tmp local area.
*/
@Override
public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
return fs.startLocalOutput(fsOutputFile, tmpLocalFile);
}
/**
* Called when we're all done writing to the target. A local FS will
* do nothing, because we've written to exactly the right place. A remote
* FS will copy the contents of tmpLocalFile to the correct target at
* fsOutputFile.
*/
@Override
public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
throws IOException {
fs.completeLocalOutput(fsOutputFile, tmpLocalFile);
}
/** Return the total size of all files in the filesystem.*/
@Override
public long getUsed() throws IOException{
return fs.getUsed();
}
@Override
public long getDefaultBlockSize() {
return fs.getDefaultBlockSize();
}
@Override
public short getDefaultReplication() {
return fs.getDefaultReplication();
}
@Override
public FsServerDefaults getServerDefaults() throws IOException {
return fs.getServerDefaults();
}
// path variants delegate to underlying filesystem
@Override
public long getDefaultBlockSize(Path f) {
return fs.getDefaultBlockSize(f);
}
@Override
public short getDefaultReplication(Path f) {
return fs.getDefaultReplication(f);
}
@Override
public FsServerDefaults getServerDefaults(Path f) throws IOException {
return fs.getServerDefaults(f);
}
/**
* Get file status.
*/
@Override
public FileStatus getFileStatus(Path f) throws IOException {
return fs.getFileStatus(f);
}
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, IOException {
fs.access(path, mode);
}
public void createSymlink(final Path target, final Path link,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
ParentNotDirectoryException, UnsupportedFileSystemException,
IOException {
fs.createSymlink(target, link, createParent);
}
public FileStatus getFileLinkStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
return fs.getFileLinkStatus(f);
}
public boolean supportsSymlinks() {
return fs.supportsSymlinks();
}
public Path getLinkTarget(Path f) throws IOException {
return fs.getLinkTarget(f);
}
protected Path resolveLink(Path f) throws IOException {
return fs.resolveLink(f);
}
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
return fs.getFileChecksum(f);
}
@Override
public FileChecksum getFileChecksum(Path f, long length) throws IOException {
return fs.getFileChecksum(f, length);
}
@Override
public void setVerifyChecksum(boolean verifyChecksum) {
fs.setVerifyChecksum(verifyChecksum);
}
@Override
public void setWriteChecksum(boolean writeChecksum) {
fs.setWriteChecksum(writeChecksum);
}
@Override
public Configuration getConf() {
return fs.getConf();
}
@Override
public void close() throws IOException {
super.close();
fs.close();
}
@Override
public void setOwner(Path p, String username, String groupname
) throws IOException {
fs.setOwner(p, username, groupname);
}
@Override
public void setTimes(Path p, long mtime, long atime
) throws IOException {
fs.setTimes(p, mtime, atime);
}
@Override
public void setPermission(Path p, FsPermission permission
) throws IOException {
fs.setPermission(p, permission);
}
@Override
protected FSDataOutputStream primitiveCreate(Path f,
FsPermission absolutePermission, EnumSet<CreateFlag> flag,
int bufferSize, short replication, long blockSize,
Progressable progress, ChecksumOpt checksumOpt)
throws IOException {
return fs.primitiveCreate(f, absolutePermission, flag,
bufferSize, replication, blockSize, progress, checksumOpt);
}
@Override
@SuppressWarnings("deprecation")
protected boolean primitiveMkdir(Path f, FsPermission abdolutePermission)
throws IOException {
return fs.primitiveMkdir(f, abdolutePermission);
}
@Override // FileSystem
public FileSystem[] getChildFileSystems() {
return new FileSystem[]{fs};
}
@Override // FileSystem
public Path createSnapshot(Path path, String snapshotName)
throws IOException {
return fs.createSnapshot(path, snapshotName);
}
@Override // FileSystem
public void renameSnapshot(Path path, String snapshotOldName,
String snapshotNewName) throws IOException {
fs.renameSnapshot(path, snapshotOldName, snapshotNewName);
}
@Override // FileSystem
public void deleteSnapshot(Path path, String snapshotName)
throws IOException {
fs.deleteSnapshot(path, snapshotName);
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
fs.modifyAclEntries(path, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
fs.removeAclEntries(path, aclSpec);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
fs.removeDefaultAcl(path);
}
@Override
public void removeAcl(Path path) throws IOException {
fs.removeAcl(path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
fs.setAcl(path, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
return fs.getAclStatus(path);
}
@Override
public void setXAttr(Path path, String name, byte[] value)
throws IOException {
fs.setXAttr(path, name, value);
}
@Override
public void setXAttr(Path path, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
fs.setXAttr(path, name, value, flag);
}
@Override
public byte[] getXAttr(Path path, String name) throws IOException {
return fs.getXAttr(path, name);
}
@Override
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
return fs.getXAttrs(path);
}
@Override
public Map<String, byte[]> getXAttrs(Path path, List<String> names)
throws IOException {
return fs.getXAttrs(path, names);
}
@Override
public List<String> listXAttrs(Path path) throws IOException {
return fs.listXAttrs(path);
}
@Override
public void removeXAttr(Path path, String name) throws IOException {
fs.removeXAttr(path, name);
}
@Override
public void setStoragePolicy(Path src, String policyName)
throws IOException {
fs.setStoragePolicy(src, policyName);
}
@Override
public BlockStoragePolicySpi getStoragePolicy(final Path src)
throws IOException {
return fs.getStoragePolicy(src);
}
@Override
public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
throws IOException {
return fs.getAllStoragePolicies();
}
}
| 17,916 | 26.9081 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface PathFilter {
/**
* Tests whether or not the specified abstract pathname should be
* included in a pathname list.
*
* @param path The abstract pathname to be tested
* @return <code>true</code> if and only if <code>pathname</code>
* should be included
*/
boolean accept(Path path);
}
| 1,349 | 34.526316 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ReadOption.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Options that can be used when reading from a FileSystem.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public enum ReadOption {
/**
* Skip checksums when reading. This option may be useful when reading a file
* format that has built-in checksums, or for testing purposes.
*/
SKIP_CHECKSUMS,
}
| 1,283 | 35.685714 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
/**
* Defines the types of supported storage media. The default storage
* medium is assumed to be DISK.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public enum StorageType {
// sorted by the speed of the storage types, from fast to slow
RAM_DISK(true),
SSD(false),
DISK(false),
ARCHIVE(false);
private final boolean isTransient;
public static final StorageType DEFAULT = DISK;
public static final StorageType[] EMPTY_ARRAY = {};
private static final StorageType[] VALUES = values();
StorageType(boolean isTransient) {
this.isTransient = isTransient;
}
public boolean isTransient() {
return isTransient;
}
public boolean supportTypeQuota() {
return !isTransient;
}
public boolean isMovable() {
return !isTransient;
}
public static List<StorageType> asList() {
return Arrays.asList(VALUES);
}
public static List<StorageType> getMovableTypes() {
return getNonTransientTypes();
}
public static List<StorageType> getTypesSupportingQuota() {
return getNonTransientTypes();
}
public static StorageType parseStorageType(int i) {
return VALUES[i];
}
public static StorageType parseStorageType(String s) {
return StorageType.valueOf(StringUtils.toUpperCase(s));
}
private static List<StorageType> getNonTransientTypes() {
List<StorageType> nonTransientTypes = new ArrayList<>();
for (StorageType t : VALUES) {
if ( t.isTransient == false ) {
nonTransientTypes.add(t);
}
}
return nonTransientTypes;
}
}
| 2,631 | 26.705263 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PositionedReadable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** Stream that permits positional reading. */
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface PositionedReadable {
/**
* Read upto the specified number of bytes, from a given
* position within a file, and return the number of bytes read. This does not
* change the current offset of a file, and is thread-safe.
*/
public int read(long position, byte[] buffer, int offset, int length)
throws IOException;
/**
* Read the specified number of bytes, from a given
* position within a file. This does not
* change the current offset of a file, and is thread-safe.
*/
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException;
/**
* Read number of bytes equal to the length of the buffer, from a given
* position within a file. This does not
* change the current offset of a file, and is thread-safe.
*/
public void readFully(long position, byte[] buffer) throws IOException;
}
| 1,972 | 36.942308 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathNotFoundException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/**
* Exception corresponding to Permission denied - ENOENT
*/
public class PathNotFoundException extends PathIOException {
static final long serialVersionUID = 0L;
/** @param path for the exception */
public PathNotFoundException(String path) {
super(path, "No such file or directory");
}
}
| 1,147 | 38.586207 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import org.apache.commons.lang.WordUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.fs.shell.FsCommand;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.tracing.TraceUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.Sampler;
import org.apache.htrace.SamplerBuilder;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
/** Provide command line access to a FileSystem. */
@InterfaceAudience.Private
public class FsShell extends Configured implements Tool {
static final Log LOG = LogFactory.getLog(FsShell.class);
private static final int MAX_LINE_WIDTH = 80;
private FileSystem fs;
private Trash trash;
protected CommandFactory commandFactory;
private Sampler traceSampler;
private final String usagePrefix =
"Usage: hadoop fs [generic options]";
private SpanReceiverHost spanReceiverHost;
static final String SEHLL_HTRACE_PREFIX = "dfs.shell.htrace.";
/**
* Default ctor with no configuration. Be sure to invoke
* {@link #setConf(Configuration)} with a valid configuration prior
* to running commands.
*/
public FsShell() {
this(null);
}
/**
* Construct a FsShell with the given configuration. Commands can be
* executed via {@link #run(String[])}
* @param conf the hadoop configuration
*/
public FsShell(Configuration conf) {
super(conf);
}
protected FileSystem getFS() throws IOException {
if (fs == null) {
fs = FileSystem.get(getConf());
}
return fs;
}
protected Trash getTrash() throws IOException {
if (this.trash == null) {
this.trash = new Trash(getConf());
}
return this.trash;
}
protected void init() throws IOException {
getConf().setQuietMode(true);
if (commandFactory == null) {
commandFactory = new CommandFactory(getConf());
commandFactory.addObject(new Help(), "-help");
commandFactory.addObject(new Usage(), "-usage");
registerCommands(commandFactory);
}
this.spanReceiverHost =
SpanReceiverHost.get(getConf(), SEHLL_HTRACE_PREFIX);
}
protected void registerCommands(CommandFactory factory) {
// TODO: DFSAdmin subclasses FsShell so need to protect the command
// registration. This class should morph into a base class for
// commands, and then this method can be abstract
if (this.getClass().equals(FsShell.class)) {
factory.registerCommands(FsCommand.class);
}
}
/**
* Returns the Trash object associated with this shell.
* @return Path to the trash
* @throws IOException upon error
*/
public Path getCurrentTrashDir() throws IOException {
return getTrash().getCurrentTrashDir();
}
// NOTE: Usage/Help are inner classes to allow access to outer methods
// that access commandFactory
/**
* Display help for commands with their short usage and long description
*/
protected class Usage extends FsCommand {
public static final String NAME = "usage";
public static final String USAGE = "[cmd ...]";
public static final String DESCRIPTION =
"Displays the usage for given command or all commands if none " +
"is specified.";
@Override
protected void processRawArguments(LinkedList<String> args) {
if (args.isEmpty()) {
printUsage(System.out);
} else {
for (String arg : args) printUsage(System.out, arg);
}
}
}
/**
* Displays short usage of commands sans the long description
*/
protected class Help extends FsCommand {
public static final String NAME = "help";
public static final String USAGE = "[cmd ...]";
public static final String DESCRIPTION =
"Displays help for given command or all commands if none " +
"is specified.";
@Override
protected void processRawArguments(LinkedList<String> args) {
if (args.isEmpty()) {
printHelp(System.out);
} else {
for (String arg : args) printHelp(System.out, arg);
}
}
}
/*
* The following are helper methods for getInfo(). They are defined
* outside of the scope of the Help/Usage class because the run() method
* needs to invoke them too.
*/
// print all usages
private void printUsage(PrintStream out) {
printInfo(out, null, false);
}
// print one usage
private void printUsage(PrintStream out, String cmd) {
printInfo(out, cmd, false);
}
// print all helps
private void printHelp(PrintStream out) {
printInfo(out, null, true);
}
// print one help
private void printHelp(PrintStream out, String cmd) {
printInfo(out, cmd, true);
}
private void printInfo(PrintStream out, String cmd, boolean showHelp) {
if (cmd != null) {
// display help or usage for one command
Command instance = commandFactory.getInstance("-" + cmd);
if (instance == null) {
throw new UnknownCommandException(cmd);
}
if (showHelp) {
printInstanceHelp(out, instance);
} else {
printInstanceUsage(out, instance);
}
} else {
// display help or usage for all commands
out.println(usagePrefix);
// display list of short usages
ArrayList<Command> instances = new ArrayList<Command>();
for (String name : commandFactory.getNames()) {
Command instance = commandFactory.getInstance(name);
if (!instance.isDeprecated()) {
out.println("\t[" + instance.getUsage() + "]");
instances.add(instance);
}
}
// display long descriptions for each command
if (showHelp) {
for (Command instance : instances) {
out.println();
printInstanceHelp(out, instance);
}
}
out.println();
ToolRunner.printGenericCommandUsage(out);
}
}
private void printInstanceUsage(PrintStream out, Command instance) {
out.println(usagePrefix + " " + instance.getUsage());
}
private void printInstanceHelp(PrintStream out, Command instance) {
out.println(instance.getUsage() + " :");
TableListing listing = null;
final String prefix = " ";
for (String line : instance.getDescription().split("\n")) {
if (line.matches("^[ \t]*[-<].*$")) {
String[] segments = line.split(":");
if (segments.length == 2) {
if (listing == null) {
listing = createOptionTableListing();
}
listing.addRow(segments[0].trim(), segments[1].trim());
continue;
}
}
// Normal literal description.
if (listing != null) {
for (String listingLine : listing.toString().split("\n")) {
out.println(prefix + listingLine);
}
listing = null;
}
for (String descLine : WordUtils.wrap(
line, MAX_LINE_WIDTH, "\n", true).split("\n")) {
out.println(prefix + descLine);
}
}
if (listing != null) {
for (String listingLine : listing.toString().split("\n")) {
out.println(prefix + listingLine);
}
}
}
// Creates a two-row table, the first row is for the command line option,
// the second row is for the option description.
private TableListing createOptionTableListing() {
return new TableListing.Builder().addField("").addField("", true)
.wrapWidth(MAX_LINE_WIDTH).build();
}
/**
* run
*/
@Override
public int run(String argv[]) throws Exception {
// initialize FsShell
init();
traceSampler = new SamplerBuilder(TraceUtils.
wrapHadoopConf(SEHLL_HTRACE_PREFIX, getConf())).build();
int exitCode = -1;
if (argv.length < 1) {
printUsage(System.err);
} else {
String cmd = argv[0];
Command instance = null;
try {
instance = commandFactory.getInstance(cmd);
if (instance == null) {
throw new UnknownCommandException();
}
TraceScope scope = Trace.startSpan(instance.getCommandName(), traceSampler);
if (scope.getSpan() != null) {
String args = StringUtils.join(" ", argv);
if (args.length() > 2048) {
args = args.substring(0, 2048);
}
scope.getSpan().addKVAnnotation("args", args);
}
try {
exitCode = instance.run(Arrays.copyOfRange(argv, 1, argv.length));
} finally {
scope.close();
}
} catch (IllegalArgumentException e) {
displayError(cmd, e.getLocalizedMessage());
if (instance != null) {
printInstanceUsage(System.err, instance);
}
} catch (Exception e) {
// instance.run catches IOE, so something is REALLY wrong if here
LOG.debug("Error", e);
displayError(cmd, "Fatal internal error");
e.printStackTrace(System.err);
}
}
return exitCode;
}
private void displayError(String cmd, String message) {
for (String line : message.split("\n")) {
System.err.println(cmd + ": " + line);
if (cmd.charAt(0) != '-') {
Command instance = null;
instance = commandFactory.getInstance("-" + cmd);
if (instance != null) {
System.err.println("Did you mean -" + cmd + "? This command " +
"begins with a dash.");
}
}
}
}
/**
* Performs any necessary cleanup
* @throws IOException upon error
*/
public void close() throws IOException {
if (fs != null) {
fs.close();
fs = null;
}
if (this.spanReceiverHost != null) {
this.spanReceiverHost.closeReceivers();
}
}
/**
* main() has some simple utility methods
* @param argv the command and its arguments
* @throws Exception upon error
*/
public static void main(String argv[]) throws Exception {
FsShell shell = newShellInstance();
Configuration conf = new Configuration();
conf.setQuietMode(false);
shell.setConf(conf);
int res;
try {
res = ToolRunner.run(shell, argv);
} finally {
shell.close();
}
System.exit(res);
}
// TODO: this should be abstract in a base class
protected static FsShell newShellInstance() {
return new FsShell();
}
/**
* The default ctor signals that the command being executed does not exist,
* while other ctor signals that a specific command does not exist. The
* latter is used by commands that process other commands, ex. -usage/-help
*/
@SuppressWarnings("serial")
static class UnknownCommandException extends IllegalArgumentException {
private final String cmd;
UnknownCommandException() { this(null); }
UnknownCommandException(String cmd) { this.cmd = cmd; }
@Override
public String getMessage() {
return ((cmd != null) ? "`"+cmd+"': " : "") + "Unknown command";
}
}
}
| 12,220 | 29.70603 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Seekable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Stream that permits seeking.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface Seekable {
/**
* Seek to the given offset from the start of the file.
* The next read() will be from that location. Can't
* seek past the end of the file.
*/
void seek(long pos) throws IOException;
/**
* Return the current offset from the start of the file
*/
long getPos() throws IOException;
/**
* Seeks a different copy of the data. Returns true if
* found a new source, false otherwise.
*/
@InterfaceAudience.Private
boolean seekToNewSource(long targetPos) throws IOException;
}
| 1,621 | 31.44 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ZeroCopyUnavailableException;
/****************************************************************
* FSInputStream is a generic old InputStream with a little bit
* of RAF-style seek ability.
*
*****************************************************************/
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable
public abstract class FSInputStream extends InputStream
implements Seekable, PositionedReadable {
/**
* Seek to the given offset from the start of the file.
* The next read() will be from that location. Can't
* seek past the end of the file.
*/
@Override
public abstract void seek(long pos) throws IOException;
/**
* Return the current offset from the start of the file
*/
@Override
public abstract long getPos() throws IOException;
/**
* Seeks a different copy of the data. Returns true if
* found a new source, false otherwise.
*/
@Override
public abstract boolean seekToNewSource(long targetPos) throws IOException;
@Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
synchronized (this) {
long oldPos = getPos();
int nread = -1;
try {
seek(position);
nread = read(buffer, offset, length);
} finally {
seek(oldPos);
}
return nread;
}
}
@Override
public void readFully(long position, byte[] buffer, int offset, int length)
throws IOException {
int nread = 0;
while (nread < length) {
int nbytes = read(position+nread, buffer, offset+nread, length-nread);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
}
@Override
public void readFully(long position, byte[] buffer)
throws IOException {
readFully(position, buffer, 0, buffer.length);
}
}
| 2,928 | 30.836957 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.crypto;
import java.io.IOException;
import org.apache.hadoop.crypto.CryptoCodec;
import org.apache.hadoop.crypto.CryptoOutputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
public class CryptoFSDataOutputStream extends FSDataOutputStream {
private final FSDataOutputStream fsOut;
public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
int bufferSize, byte[] key, byte[] iv) throws IOException {
super(new CryptoOutputStream(out, codec, bufferSize, key, iv,
out.getPos()), null, out.getPos());
this.fsOut = out;
}
public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
byte[] key, byte[] iv) throws IOException {
super(new CryptoOutputStream(out, codec, key, iv, out.getPos()),
null, out.getPos());
this.fsOut = out;
}
@Override
public long getPos() throws IOException {
return fsOut.getPos();
}
}
| 1,761 | 35.708333 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.crypto;
import java.io.IOException;
import org.apache.hadoop.crypto.CryptoCodec;
import org.apache.hadoop.crypto.CryptoInputStream;
import org.apache.hadoop.fs.FSDataInputStream;
public class CryptoFSDataInputStream extends FSDataInputStream {
public CryptoFSDataInputStream(FSDataInputStream in, CryptoCodec codec,
int bufferSize, byte[] key, byte[] iv) throws IOException {
super(new CryptoInputStream(in, codec, bufferSize, key, iv));
}
public CryptoFSDataInputStream(FSDataInputStream in, CryptoCodec codec,
byte[] key, byte[] iv) throws IOException {
super(new CryptoInputStream(in, codec, key, iv));
}
}
| 1,487 | 38.157895 | 75 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.