repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* READLINK3 Response
*/
public class READLINK3Response extends NFS3Response {
private final Nfs3FileAttributes postOpSymlinkAttr;
private final byte path[];
public READLINK3Response(int status) {
this(status, new Nfs3FileAttributes(), new byte[0]);
}
public READLINK3Response(int status, Nfs3FileAttributes postOpAttr,
byte path[]) {
super(status);
this.postOpSymlinkAttr = postOpAttr;
this.path = new byte[path.length];
System.arraycopy(path, 0, this.path, 0, path.length);
}
public static READLINK3Response deserialize(XDR xdr) {
int status = xdr.readInt();
xdr.readBoolean();
Nfs3FileAttributes postOpSymlinkAttr = Nfs3FileAttributes.deserialize(xdr);
byte path[] = new byte[0];
if (status == Nfs3Status.NFS3_OK) {
path = xdr.readVariableOpaque();
}
return new READLINK3Response(status, postOpSymlinkAttr, path);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
out.writeBoolean(true); // Attribute follows
postOpSymlinkAttr.serialize(out);
if (getStatus() == Nfs3Status.NFS3_OK) {
out.writeVariableOpaque(path);
}
return out;
}
}
| 2,259 | 32.731343 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKDIR3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* MKDIR3 Response
*/
public class MKDIR3Response extends NFS3Response {
private final FileHandle objFileHandle;
private final Nfs3FileAttributes objAttr;
private final WccData dirWcc;
public MKDIR3Response(int status) {
this(status, null, null, new WccData(null, null));
}
public MKDIR3Response(int status, FileHandle handle, Nfs3FileAttributes attr,
WccData dirWcc) {
super(status);
this.objFileHandle = handle;
this.objAttr = attr;
this.dirWcc = dirWcc;
}
public FileHandle getObjFileHandle() {
return objFileHandle;
}
public Nfs3FileAttributes getObjAttr() {
return objAttr;
}
public WccData getDirWcc() {
return dirWcc;
}
public static MKDIR3Response deserialize(XDR xdr) {
int status = xdr.readInt();
FileHandle objFileHandle = new FileHandle();
Nfs3FileAttributes objAttr = null;
WccData dirWcc;
if (status == Nfs3Status.NFS3_OK) {
xdr.readBoolean();
objFileHandle.deserialize(xdr);
xdr.readBoolean();
objAttr = Nfs3FileAttributes.deserialize(xdr);
}
dirWcc = WccData.deserialize(xdr);
return new MKDIR3Response(status, objFileHandle, objAttr, dirWcc);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
if (getStatus() == Nfs3Status.NFS3_OK) {
out.writeBoolean(true); // Handle follows
objFileHandle.serialize(out);
out.writeBoolean(true); // Attributes follow
objAttr.serialize(out);
}
dirWcc.serialize(out);
return out;
}
}
| 2,672 | 30.081395 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READ3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import java.nio.ByteBuffer;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* READ3 Response
*/
public class READ3Response extends NFS3Response {
private final Nfs3FileAttributes postOpAttr;
private final int count; // The real bytes of read data
private final boolean eof;
private final ByteBuffer data;
public READ3Response(int status) {
this(status, new Nfs3FileAttributes(), 0, false, null);
}
public READ3Response(int status, Nfs3FileAttributes postOpAttr, int count,
boolean eof, ByteBuffer data) {
super(status);
this.postOpAttr = postOpAttr;
this.count = count;
this.eof = eof;
this.data = data;
}
public Nfs3FileAttributes getPostOpAttr() {
return postOpAttr;
}
public int getCount() {
return count;
}
public boolean isEof() {
return eof;
}
public ByteBuffer getData() {
return data;
}
public static READ3Response deserialize(XDR xdr) {
int status = xdr.readInt();
xdr.readBoolean();
Nfs3FileAttributes postOpAttr = Nfs3FileAttributes.deserialize(xdr);
int count = 0;
boolean eof = false;
byte[] data = new byte[0];
if (status == Nfs3Status.NFS3_OK) {
count = xdr.readInt();
eof = xdr.readBoolean();
int len = xdr.readInt();
assert (len == count);
data = xdr.readFixedOpaque(count);
}
return new READ3Response(status, postOpAttr, count, eof,
ByteBuffer.wrap(data));
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
out.writeBoolean(true); // Attribute follows
postOpAttr.serialize(out);
if (getStatus() == Nfs3Status.NFS3_OK) {
out.writeInt(count);
out.writeBoolean(eof);
out.writeInt(count);
out.writeFixedOpaque(data.array(), count);
}
return out;
}
}
| 2,846 | 27.757576 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/RENAME3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* RENAME3 Response
*/
public class RENAME3Response extends NFS3Response {
private final WccData fromDirWcc;
private final WccData toDirWcc;
public RENAME3Response(int status) {
this(status, new WccData(null, null), new WccData(null, null));
}
public RENAME3Response(int status, WccData fromWccData, WccData toWccData) {
super(status);
this.fromDirWcc = fromWccData;
this.toDirWcc = toWccData;
}
public WccData getFromDirWcc() {
return fromDirWcc;
}
public WccData getToDirWcc() {
return toDirWcc;
}
public static RENAME3Response deserialize(XDR xdr) {
int status = xdr.readInt();
WccData fromDirWcc = WccData.deserialize(xdr);
WccData toDirWcc = WccData.deserialize(xdr);
return new RENAME3Response(status, fromDirWcc, toDirWcc);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
fromDirWcc.serialize(out);
toDirWcc.serialize(out);
return out;
}
}
| 1,948 | 30.435484 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/COMMIT3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* COMMIT3 Response
*/
public class COMMIT3Response extends NFS3Response {
private final WccData fileWcc;
private final long verf;
public COMMIT3Response(int status) {
this(status, new WccData(null, null), Nfs3Constant.WRITE_COMMIT_VERF);
}
public COMMIT3Response(int status, WccData fileWcc, long verf) {
super(status);
this.fileWcc = fileWcc;
this.verf = verf;
}
public WccData getFileWcc() {
return fileWcc;
}
public long getVerf() {
return verf;
}
public static COMMIT3Response deserialize(XDR xdr) {
int status = xdr.readInt();
long verf = 0;
WccData fileWcc = WccData.deserialize(xdr);
if (status == Nfs3Status.NFS3_OK) {
verf = xdr.readHyper();
}
return new COMMIT3Response(status, fileWcc, verf);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
fileWcc.serialize(out);
if (getStatus() == Nfs3Status.NFS3_OK) {
out.writeLongAsHyper(verf);
}
return out;
}
}
| 2,084 | 28.785714 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSSTAT3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* FSSTAT3 Response
*/
public class FSSTAT3Response extends NFS3Response {
private Nfs3FileAttributes postOpAttr;
// The total size, in bytes, of the file system.
private final long tbytes;
// The amount of free space, in bytes, in the file system.
private final long fbytes;
/*
* The amount of free space, in bytes, available to the user identified by the
* authentication information in the RPC. (This reflects space that is
* reserved by the file system; it does not reflect any quota system
* implemented by the server.)
*/
private final long abytes;
/*
* The total number of file slots in the file system. (On a UNIX server, this
* often corresponds to the number of inodes configured.)
*/
private final long tfiles;
/* The number of free file slots in the file system. */
private final long ffiles;
/*
* The number of free file slots that are available to the user corresponding
* to the authentication information in the RPC. (This reflects slots that are
* reserved by the file system; it does not reflect any quota system
* implemented by the server.)
*/
private final long afiles;
/*
* A measure of file system volatility: this is the number of seconds for
* which the file system is not expected to change. For a volatile, frequently
* updated file system, this will be 0. For an immutable file system, such as
* a CD-ROM, this would be the largest unsigned integer. For file systems that
* are infrequently modified, for example, one containing local executable
* programs and on-line documentation, a value corresponding to a few hours or
* days might be used. The client may use this as a hint in tuning its cache
* management. Note however, this measure is assumed to be dynamic and may
* change at any time.
*/
private final int invarsec;
public FSSTAT3Response(int status) {
this(status, null, 0, 0, 0, 0, 0, 0, 0);
}
public FSSTAT3Response(int status, Nfs3FileAttributes postOpAttr,
long tbytes, long fbytes, long abytes, long tfiles, long ffiles,
long afiles, int invarsec) {
super(status);
this.postOpAttr = postOpAttr;
this.tbytes = tbytes;
this.fbytes = fbytes;
this.abytes = abytes;
this.tfiles = tfiles;
this.ffiles = ffiles;
this.afiles = afiles;
this.invarsec = invarsec;
}
public static FSSTAT3Response deserialize(XDR xdr) {
int status = xdr.readInt();
xdr.readBoolean();
Nfs3FileAttributes postOpAttr = Nfs3FileAttributes.deserialize(xdr);
long tbytes = 0;
long fbytes = 0;
long abytes = 0;
long tfiles = 0;
long ffiles = 0;
long afiles = 0;
int invarsec = 0;
if (status == Nfs3Status.NFS3_OK) {
tbytes = xdr.readHyper();
fbytes = xdr.readHyper();
abytes = xdr.readHyper();
tfiles = xdr.readHyper();
ffiles = xdr.readHyper();
afiles = xdr.readHyper();
invarsec = xdr.readInt();
}
return new FSSTAT3Response(status, postOpAttr, tbytes, fbytes, abytes,
tfiles, ffiles, afiles, invarsec);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
out.writeBoolean(true);
if (postOpAttr == null) {
postOpAttr = new Nfs3FileAttributes();
}
postOpAttr.serialize(out);
if (getStatus() == Nfs3Status.NFS3_OK) {
out.writeLongAsHyper(tbytes);
out.writeLongAsHyper(fbytes);
out.writeLongAsHyper(abytes);
out.writeLongAsHyper(tfiles);
out.writeLongAsHyper(ffiles);
out.writeLongAsHyper(afiles);
out.writeInt(invarsec);
}
return out;
}
}
| 4,763 | 32.787234 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/WRITE3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* WRITE3 Response
*/
public class WRITE3Response extends NFS3Response {
private final WccData fileWcc; // return on both success and failure
private final int count;
private final WriteStableHow stableHow;
private final long verifer;
public WRITE3Response(int status) {
this(status, new WccData(null, null), 0, WriteStableHow.UNSTABLE,
Nfs3Constant.WRITE_COMMIT_VERF);
}
public WRITE3Response(int status, WccData fileWcc, int count,
WriteStableHow stableHow, long verifier) {
super(status);
this.fileWcc = fileWcc;
this.count = count;
this.stableHow = stableHow;
this.verifer = verifier;
}
public int getCount() {
return count;
}
public WriteStableHow getStableHow() {
return stableHow;
}
public long getVerifer() {
return verifer;
}
public static WRITE3Response deserialize(XDR xdr) {
int status = xdr.readInt();
WccData fileWcc = WccData.deserialize(xdr);
int count = 0;
WriteStableHow stableHow = null;
long verifier = 0;
if (status == Nfs3Status.NFS3_OK) {
count = xdr.readInt();
int how = xdr.readInt();
stableHow = WriteStableHow.values()[how];
verifier = xdr.readHyper();
}
return new WRITE3Response(status, fileWcc, count, stableHow, verifier);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
fileWcc.serialize(out);
if (getStatus() == Nfs3Status.NFS3_OK) {
out.writeInt(count);
out.writeInt(stableHow.getValue());
out.writeLongAsHyper(verifer);
}
return out;
}
}
| 2,830 | 30.10989 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/WccAttr.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.NfsTime;
import org.apache.hadoop.oncrpc.XDR;
/**
* WccAttr saves attributes used for weak cache consistency
*/
public class WccAttr {
long size;
NfsTime mtime; // in milliseconds
NfsTime ctime; // in milliseconds
public long getSize() {
return size;
}
public NfsTime getMtime() {
return mtime;
}
public NfsTime getCtime() {
return ctime;
}
public WccAttr() {
this.size = 0;
mtime = null;
ctime = null;
}
public WccAttr(long size, NfsTime mtime, NfsTime ctime) {
this.size = size;
this.mtime = mtime;
this.ctime = ctime;
}
public static WccAttr deserialize(XDR xdr) {
long size = xdr.readHyper();
NfsTime mtime = NfsTime.deserialize(xdr);
NfsTime ctime = NfsTime.deserialize(xdr);
return new WccAttr(size, mtime, ctime);
}
public void serialize(XDR out) {
out.writeLongAsHyper(size);
if (mtime == null) {
mtime = new NfsTime(0);
}
mtime.serialize(out);
if (ctime == null) {
ctime = new NfsTime(0);
}
ctime.serialize(out);
}
}
| 1,943 | 25.630137 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/RMDIR3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* RMDIR3 Response
*/
public class RMDIR3Response extends NFS3Response {
private final WccData dirWcc;
public RMDIR3Response(int status) {
this(status, new WccData(null, null));
}
public RMDIR3Response(int status, WccData wccData) {
super(status);
this.dirWcc = wccData;
}
public WccData getDirWcc() {
return dirWcc;
}
public static RMDIR3Response deserialize(XDR xdr) {
int status = xdr.readInt();
WccData dirWcc = WccData.deserialize(xdr);
return new RMDIR3Response(status, dirWcc);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
dirWcc.serialize(out);
return out;
}
}
| 1,651 | 29.036364 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/PATHCONF3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* PATHCONF3 Response
*/
public class PATHCONF3Response extends NFS3Response {
private final Nfs3FileAttributes postOpAttr;
/* The maximum number of hard links to an object. */
private final int linkMax;
/* The maximum length of a component of a filename. */
private final int nameMax;
/*
* If TRUE, the server will reject any request that includes a name longer
* than name_max with the error, NFS3ERR_NAMETOOLONG. If FALSE, any length
* name over name_max bytes will be silently truncated to name_max bytes.
*/
private final boolean noTrunc;
/*
* If TRUE, the server will reject any request to change either the owner or
* the group associated with a file if the caller is not the privileged user.
* (Uid 0.)
*/
private final boolean chownRestricted;
/*
* If TRUE, the server file system does not distinguish case when interpreting
* filenames.
*/
private final boolean caseInsensitive;
/*
* If TRUE, the server file system will preserve the case of a name during a
* CREATE, MKDIR, MKNOD, SYMLINK, RENAME, or LINK operation.
*/
private final boolean casePreserving;
public PATHCONF3Response(int status) {
this(status, new Nfs3FileAttributes(), 0, 0, false, false, false, false);
}
public PATHCONF3Response(int status, Nfs3FileAttributes postOpAttr,
int linkMax, int nameMax, boolean noTrunc, boolean chownRestricted,
boolean caseInsensitive, boolean casePreserving) {
super(status);
this.postOpAttr = postOpAttr;
this.linkMax = linkMax;
this.nameMax = nameMax;
this.noTrunc = noTrunc;
this.chownRestricted = chownRestricted;
this.caseInsensitive = caseInsensitive;
this.casePreserving = casePreserving;
}
public static PATHCONF3Response deserialize(XDR xdr) {
int status = xdr.readInt();
xdr.readBoolean();
Nfs3FileAttributes objPostOpAttr = Nfs3FileAttributes.deserialize(xdr);
int linkMax = 0;
int nameMax = 0;
boolean noTrunc = false;
boolean chownRestricted = false;
boolean caseInsensitive = false;
boolean casePreserving = false;
if (status == Nfs3Status.NFS3_OK) {
linkMax = xdr.readInt();
nameMax = xdr.readInt();
noTrunc = xdr.readBoolean();
chownRestricted = xdr.readBoolean();
caseInsensitive = xdr.readBoolean();
casePreserving = xdr.readBoolean();
}
return new PATHCONF3Response(status, objPostOpAttr, linkMax, nameMax,
noTrunc, chownRestricted, caseInsensitive, casePreserving);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
out.writeBoolean(true);
postOpAttr.serialize(out);
if (getStatus() == Nfs3Status.NFS3_OK) {
out.writeInt(linkMax);
out.writeInt(nameMax);
out.writeBoolean(noTrunc);
out.writeBoolean(chownRestricted);
out.writeBoolean(caseInsensitive);
out.writeBoolean(casePreserving);
}
return out;
}
}
| 4,037 | 32.65 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SETATTR3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* SETATTR3 Response
*/
public class SETATTR3Response extends NFS3Response {
private final WccData wccData;
public SETATTR3Response(int status) {
this(status, new WccData(null, null));
}
public SETATTR3Response(int status, WccData wccData) {
super(status);
this.wccData = wccData;
}
public WccData getWccData() {
return wccData;
}
public static SETATTR3Response deserialize(XDR xdr) {
int status = xdr.readInt();
WccData wccData = WccData.deserialize(xdr);
return new SETATTR3Response(status, wccData);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
wccData.serialize(out);
return out;
}
}
| 1,669 | 29.925926 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* SYMLINK3 Response
*/
public class SYMLINK3Response extends NFS3Response {
private final FileHandle objFileHandle;
private final Nfs3FileAttributes objPostOpAttr;
private final WccData dirWcc;
public SYMLINK3Response(int status) {
this(status, null, null, new WccData(null, null));
}
public SYMLINK3Response(int status, FileHandle handle,
Nfs3FileAttributes attrs, WccData dirWcc) {
super(status);
this.objFileHandle = handle;
this.objPostOpAttr = attrs;
this.dirWcc = dirWcc;
}
public FileHandle getObjFileHandle() {
return objFileHandle;
}
public Nfs3FileAttributes getObjPostOpAttr() {
return objPostOpAttr;
}
public WccData getDirWcc() {
return dirWcc;
}
public static SYMLINK3Response deserialize(XDR xdr) {
int status = xdr.readInt();
FileHandle objFileHandle = new FileHandle();
Nfs3FileAttributes objPostOpAttr = null;
WccData dirWcc;
if (status == Nfs3Status.NFS3_OK) {
xdr.readBoolean();
objFileHandle.deserialize(xdr);
xdr.readBoolean();
objPostOpAttr = Nfs3FileAttributes.deserialize(xdr);
}
dirWcc = WccData.deserialize(xdr);
return new SYMLINK3Response(status, objFileHandle, objPostOpAttr, dirWcc);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
if (this.getStatus() == Nfs3Status.NFS3_OK) {
out.writeBoolean(true);
objFileHandle.serialize(out);
out.writeBoolean(true);
objPostOpAttr.serialize(out);
}
dirWcc.serialize(out);
return out;
}
}
| 2,705 | 30.103448 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/NFS3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* Base class for a NFSv3 response. This class and its subclasses contain
* the response from NFSv3 handlers.
*/
public class NFS3Response {
protected int status;
public NFS3Response(int status) {
this.status = status;
}
public int getStatus() {
return this.status;
}
public void setStatus(int status) {
this.status = status;
}
/**
* Write the response, along with the rpc header (including verifier), to the
* XDR.
*/
public XDR serialize(XDR out, int xid, Verifier verifier) {
RpcAcceptedReply reply = RpcAcceptedReply.getAcceptInstance(xid, verifier);
reply.write(out);
out.writeInt(this.getStatus());
return out;
}
}
| 1,680 | 30.12963 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LOOKUP3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import java.io.IOException;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* LOOKUP3 Response
*/
public class LOOKUP3Response extends NFS3Response {
private final FileHandle fileHandle;
private final Nfs3FileAttributes postOpObjAttr; // Can be null
private final Nfs3FileAttributes postOpDirAttr; // Can be null
public LOOKUP3Response(int status) {
this(status, null, new Nfs3FileAttributes(), new Nfs3FileAttributes());
}
public LOOKUP3Response(int status, FileHandle fileHandle,
Nfs3FileAttributes postOpObjAttr, Nfs3FileAttributes postOpDirAttributes) {
super(status);
this.fileHandle = fileHandle;
this.postOpObjAttr = postOpObjAttr;
this.postOpDirAttr = postOpDirAttributes;
}
public LOOKUP3Response(XDR xdr) throws IOException {
super(-1);
fileHandle = new FileHandle();
status = xdr.readInt();
Nfs3FileAttributes objAttr = null;
if (status == Nfs3Status.NFS3_OK) {
if (!fileHandle.deserialize(xdr)) {
throw new IOException("can't deserialize file handle");
}
objAttr = xdr.readBoolean() ? Nfs3FileAttributes.deserialize(xdr) : null;
}
postOpObjAttr = objAttr;
postOpDirAttr = xdr.readBoolean() ? Nfs3FileAttributes.deserialize(xdr)
: null;
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
if (this.status == Nfs3Status.NFS3_OK) {
fileHandle.serialize(out);
out.writeBoolean(true); // Attribute follows
postOpObjAttr.serialize(out);
}
out.writeBoolean(true); // Attribute follows
postOpDirAttr.serialize(out);
return out;
}
}
| 2,702 | 34.103896 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKNOD3Response.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.nfs.nfs3.response;
import org.apache.hadoop.nfs.nfs3.FileHandle;
import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.Verifier;
public class MKNOD3Response extends NFS3Response {
private final FileHandle objFileHandle;
private final Nfs3FileAttributes objPostOpAttr;
private final WccData dirWcc;
public MKNOD3Response(int status) {
this(status, null, null, new WccData(null, null));
}
public MKNOD3Response(int status, FileHandle handle,
Nfs3FileAttributes attrs, WccData dirWcc) {
super(status);
this.objFileHandle = handle;
this.objPostOpAttr = attrs;
this.dirWcc = dirWcc;
}
public FileHandle getObjFileHandle() {
return objFileHandle;
}
public Nfs3FileAttributes getObjPostOpAttr() {
return objPostOpAttr;
}
public WccData getDirWcc() {
return dirWcc;
}
public static MKNOD3Response deserialize(XDR xdr) {
int status = xdr.readInt();
FileHandle objFileHandle = new FileHandle();
Nfs3FileAttributes objPostOpAttr = null;
WccData dirWcc;
if (status == Nfs3Status.NFS3_OK) {
xdr.readBoolean();
objFileHandle.deserialize(xdr);
xdr.readBoolean();
objPostOpAttr = Nfs3FileAttributes.deserialize(xdr);
}
dirWcc = WccData.deserialize(xdr);
return new MKNOD3Response(status, objFileHandle, objPostOpAttr, dirWcc);
}
@Override
public XDR serialize(XDR out, int xid, Verifier verifier) {
super.serialize(out, xid, verifier);
if (this.getStatus() == Nfs3Status.NFS3_OK) {
out.writeBoolean(true);
objFileHandle.serialize(out);
out.writeBoolean(true);
objPostOpAttr.serialize(out);
}
dirWcc.serialize(out);
return out;
}
}
| 2,667 | 30.388235 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mount;
import java.util.List;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.nfs.NfsExports;
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
import org.apache.hadoop.oncrpc.XDR;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
/**
* Helper class for sending MountResponse
*/
public class MountResponse {
public final static int MNT_OK = 0;
/** Hidden constructor */
private MountResponse() {
}
/** Response for RPC call {@link MountInterface.MNTPROC#MNT} */
public static XDR writeMNTResponse(int status, XDR xdr, int xid,
byte[] handle) {
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
xdr.writeInt(status);
if (status == MNT_OK) {
xdr.writeVariableOpaque(handle);
// Only MountV3 returns a list of supported authFlavors
xdr.writeInt(1);
xdr.writeInt(AuthFlavor.AUTH_SYS.getValue());
}
return xdr;
}
/** Response for RPC call {@link MountInterface.MNTPROC#DUMP} */
public static XDR writeMountList(XDR xdr, int xid, List<MountEntry> mounts) {
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
for (MountEntry mountEntry : mounts) {
xdr.writeBoolean(true); // Value follows yes
xdr.writeString(mountEntry.getHost());
xdr.writeString(mountEntry.getPath());
}
xdr.writeBoolean(false); // Value follows no
return xdr;
}
/** Response for RPC call {@link MountInterface.MNTPROC#EXPORT} */
public static XDR writeExportList(XDR xdr, int xid, List<String> exports,
List<NfsExports> hostMatcher) {
assert (exports.size() == hostMatcher.size());
RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
for (int i = 0; i < exports.size(); i++) {
xdr.writeBoolean(true); // Value follows - yes
xdr.writeString(exports.get(i));
// List host groups
String[] hostGroups = hostMatcher.get(i).getHostGroupList();
if (hostGroups.length > 0) {
for (int j = 0; j < hostGroups.length; j++) {
xdr.writeBoolean(true); // Value follows - yes
xdr.writeVariableOpaque(hostGroups[j].getBytes(Charsets.UTF_8));
}
}
xdr.writeBoolean(false); // Value follows - no more group
}
xdr.writeBoolean(false); // Value follows - no
return xdr;
}
}
| 3,241 | 35.022222 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountInterface.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mount;
import java.net.InetAddress;
import org.apache.hadoop.oncrpc.XDR;
/**
* This is an interface that should be implemented for handle Mountd related
* requests. See RFC 1094 for more details.
*/
public interface MountInterface {
/** Mount procedures */
public static enum MNTPROC {
// the order of the values below are significant.
NULL,
MNT,
DUMP,
UMNT,
UMNTALL,
EXPORT,
EXPORTALL,
PATHCONF;
/** @return the int value representing the procedure. */
public int getValue() {
return ordinal();
}
/** @return the procedure corresponding to the value. */
public static MNTPROC fromValue(int value) {
if (value < 0 || value >= values().length) {
return null;
}
return values()[value];
}
}
/** MNTPROC_NULL - Do Nothing */
public XDR nullOp(XDR out, int xid, InetAddress client);
/** MNTPROC_MNT - Add mount entry */
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client);
/** MNTPROC_DUMP - Return mount entries */
public XDR dump(XDR out, int xid, InetAddress client);
/** MNTPROC_UMNT - Remove mount entry */
public XDR umnt(XDR xdr, XDR out, int xid, InetAddress client);
/** MNTPROC_UMNTALL - Remove all mount entries */
public XDR umntall(XDR out, int xid, InetAddress client);
/** MNTPROC_EXPORT and MNTPROC_EXPORTALL - Return export list */
//public XDR exportall(XDR out, int xid, InetAddress client);
/** MNTPROC_PATHCONF - POSIX pathconf information */
//public XDR pathconf(XDR out, int xid, InetAddress client);
}
| 2,410 | 30.723684 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mount;
/**
* Represents a mount entry.
*/
public class MountEntry {
/** Host corresponding to the mount entry */
private final String host;
/** Path corresponding to the mount entry */
private final String path;
public MountEntry(String host, String path) {
this.host = host;
this.path = path;
}
public String getHost() {
return this.host;
}
public String getPath() {
return this.path;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (!(o instanceof MountEntry)) {
return false;
}
MountEntry m = (MountEntry) o;
return getHost().equals(m.getHost()) && getPath().equals(m.getPath());
}
@Override
public int hashCode() {
return host.hashCode() * 31 + path.hashCode();
}
}
| 1,626 | 26.116667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mount;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcProgram;
import org.apache.hadoop.oncrpc.SimpleTcpServer;
import org.apache.hadoop.oncrpc.SimpleUdpServer;
import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.util.ShutdownHookManager;
import static org.apache.hadoop.util.ExitUtil.terminate;
/**
* Main class for starting mountd daemon. This daemon implements the NFS
* mount protocol. When receiving a MOUNT request from an NFS client, it checks
* the request against the list of currently exported file systems. If the
* client is permitted to mount the file system, rpc.mountd obtains a file
* handle for requested directory and returns it to the client.
*/
abstract public class MountdBase {
public static final Log LOG = LogFactory.getLog(MountdBase.class);
private final RpcProgram rpcProgram;
private int udpBoundPort; // Will set after server starts
private int tcpBoundPort; // Will set after server starts
public RpcProgram getRpcProgram() {
return rpcProgram;
}
/**
* Constructor
* @param program
* @throws IOException
*/
public MountdBase(RpcProgram program) throws IOException {
rpcProgram = program;
}
/* Start UDP server */
private void startUDPServer() {
SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(),
rpcProgram, 1);
rpcProgram.startDaemons();
try {
udpServer.run();
} catch (Throwable e) {
LOG.fatal("Failed to start the UDP server.", e);
if (udpServer.getBoundPort() > 0) {
rpcProgram.unregister(PortmapMapping.TRANSPORT_UDP,
udpServer.getBoundPort());
}
udpServer.shutdown();
terminate(1, e);
}
udpBoundPort = udpServer.getBoundPort();
}
/* Start TCP server */
private void startTCPServer() {
SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
rpcProgram, 1);
rpcProgram.startDaemons();
try {
tcpServer.run();
} catch (Throwable e) {
LOG.fatal("Failed to start the TCP server.", e);
if (tcpServer.getBoundPort() > 0) {
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP,
tcpServer.getBoundPort());
}
tcpServer.shutdown();
terminate(1, e);
}
tcpBoundPort = tcpServer.getBoundPort();
}
public void start(boolean register) {
startUDPServer();
startTCPServer();
if (register) {
ShutdownHookManager.get().addShutdownHook(new Unregister(),
SHUTDOWN_HOOK_PRIORITY);
try {
rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
} catch (Throwable e) {
LOG.fatal("Failed to register the MOUNT service.", e);
terminate(1, e);
}
}
}
/**
* Priority of the mountd shutdown hook.
*/
public static final int SHUTDOWN_HOOK_PRIORITY = 10;
private class Unregister implements Runnable {
@Override
public synchronized void run() {
rpcProgram.unregister(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
}
}
}
| 4,117 | 31.68254 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.security.Credentials;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* Represents an RPC message of type RPC call as defined in RFC 1831
*/
public class RpcCall extends RpcMessage {
public static final int RPC_VERSION = 2;
private static final Log LOG = LogFactory.getLog(RpcCall.class);
public static RpcCall read(XDR xdr) {
return new RpcCall(xdr.readInt(), RpcMessage.Type.fromValue(xdr.readInt()),
xdr.readInt(), xdr.readInt(), xdr.readInt(), xdr.readInt(),
Credentials.readFlavorAndCredentials(xdr),
Verifier.readFlavorAndVerifier(xdr));
}
public static RpcCall getInstance(int xid, int program, int version,
int procedure, Credentials cred, Verifier verifier) {
return new RpcCall(xid, RpcMessage.Type.RPC_CALL, 2, program, version,
procedure, cred, verifier);
}
private final int rpcVersion;
private final int program;
private final int version;
private final int procedure;
private final Credentials credentials;
private final Verifier verifier;
protected RpcCall(int xid, RpcMessage.Type messageType, int rpcVersion,
int program, int version, int procedure, Credentials credential,
Verifier verifier) {
super(xid, messageType);
this.rpcVersion = rpcVersion;
this.program = program;
this.version = version;
this.procedure = procedure;
this.credentials = credential;
this.verifier = verifier;
if (LOG.isTraceEnabled()) {
LOG.trace(this);
}
validate();
}
private void validateRpcVersion() {
if (rpcVersion != RPC_VERSION) {
throw new IllegalArgumentException("RPC version is expected to be "
+ RPC_VERSION + " but got " + rpcVersion);
}
}
public void validate() {
validateMessageType(RpcMessage.Type.RPC_CALL);
validateRpcVersion();
// Validate other members
// Throw exception if validation fails
}
public int getRpcVersion() {
return rpcVersion;
}
public int getProgram() {
return program;
}
public int getVersion() {
return version;
}
public int getProcedure() {
return procedure;
}
public Credentials getCredential() {
return credentials;
}
public Verifier getVerifier() {
return verifier;
}
@Override
public XDR write(XDR xdr) {
xdr.writeInt(xid);
xdr.writeInt(RpcMessage.Type.RPC_CALL.getValue());
xdr.writeInt(2);
xdr.writeInt(program);
xdr.writeInt(version);
xdr.writeInt(procedure);
Credentials.writeFlavorAndCredentials(credentials, xdr);
Verifier.writeFlavorAndVerifier(verifier, xdr);
return xdr;
}
@Override
public String toString() {
return String.format("Xid:%d, messageType:%s, rpcVersion:%d, program:%d,"
+ " version:%d, procedure:%d, credential:%s, verifier:%s", xid,
messageType, rpcVersion, program, version, procedure,
credentials.toString(), verifier.toString());
}
}
| 3,902 | 29.492188 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.util.Arrays;
/**
* A simple UDP based RPC client which just sends one request to a server.
*/
public class SimpleUdpClient {
protected final String host;
protected final int port;
protected final XDR request;
protected final boolean oneShot;
protected final DatagramSocket clientSocket;
public SimpleUdpClient(String host, int port, XDR request,
DatagramSocket clientSocket) {
this(host, port, request, true, clientSocket);
}
public SimpleUdpClient(String host, int port, XDR request, Boolean oneShot,
DatagramSocket clientSocket) {
this.host = host;
this.port = port;
this.request = request;
this.oneShot = oneShot;
this.clientSocket = clientSocket;
}
public void run() throws IOException {
InetAddress IPAddress = InetAddress.getByName(host);
byte[] sendData = request.getBytes();
byte[] receiveData = new byte[65535];
// Use the provided socket if there is one, else just make a new one.
DatagramSocket socket = this.clientSocket == null ?
new DatagramSocket() : this.clientSocket;
try {
DatagramPacket sendPacket = new DatagramPacket(sendData, sendData.length,
IPAddress, port);
socket.send(sendPacket);
socket.setSoTimeout(500);
DatagramPacket receivePacket = new DatagramPacket(receiveData,
receiveData.length);
socket.receive(receivePacket);
// Check reply status
XDR xdr = new XDR(Arrays.copyOfRange(receiveData, 0,
receivePacket.getLength()));
RpcReply reply = RpcReply.read(xdr);
if (reply.getState() != RpcReply.ReplyState.MSG_ACCEPTED) {
throw new IOException("Request failed: " + reply.getState());
}
} finally {
// If the client socket was passed in to this UDP client, it's on the
// caller of this UDP client to close that socket.
if (this.clientSocket == null) {
socket.close();
}
}
}
}
| 2,920 | 33.77381 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
/**
* Represent an RPC message as defined in RFC 1831.
*/
public abstract class RpcMessage {
/** Message type */
public static enum Type {
// the order of the values below are significant.
RPC_CALL,
RPC_REPLY;
public int getValue() {
return ordinal();
}
public static Type fromValue(int value) {
if (value < 0 || value >= values().length) {
return null;
}
return values()[value];
}
}
protected final int xid;
protected final Type messageType;
RpcMessage(int xid, Type messageType) {
if (messageType != Type.RPC_CALL && messageType != Type.RPC_REPLY) {
throw new IllegalArgumentException("Invalid message type " + messageType);
}
this.xid = xid;
this.messageType = messageType;
}
public abstract XDR write(XDR xdr);
public int getXid() {
return xid;
}
public Type getMessageType() {
return messageType;
}
protected void validateMessageType(Type expected) {
if (expected != messageType) {
throw new IllegalArgumentException("Message type is expected to be "
+ expected + " but got " + messageType);
}
}
}
| 2,009 | 27.714286 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelHandler;
/**
* A simple TCP based RPC client handler used by {@link SimpleTcpServer}.
*/
public class SimpleTcpClientHandler extends SimpleChannelHandler {
public static final Log LOG = LogFactory.getLog(SimpleTcpClient.class);
protected final XDR request;
public SimpleTcpClientHandler(XDR request) {
this.request = request;
}
@Override
public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent e) {
// Send the request
if (LOG.isDebugEnabled()) {
LOG.debug("sending PRC request");
}
ChannelBuffer outBuf = XDR.writeMessageTcp(request, true);
e.getChannel().write(outBuf);
}
/**
* Shutdown connection by default. Subclass can override this method to do
* more interaction with the server.
*/
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
e.getChannel().close();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
LOG.warn("Unexpected exception from downstream: ", e.getCause());
e.getChannel().close();
}
}
| 2,295 | 34.323077 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.net.InetSocketAddress;
import java.util.concurrent.Executors;
import org.jboss.netty.bootstrap.ClientBootstrap;
import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
/**
* A simple TCP based RPC client which just sends a request to a server.
*/
public class SimpleTcpClient {
protected final String host;
protected final int port;
protected final XDR request;
protected ChannelPipelineFactory pipelineFactory;
protected final boolean oneShot;
public SimpleTcpClient(String host, int port, XDR request) {
this(host,port, request, true);
}
public SimpleTcpClient(String host, int port, XDR request, Boolean oneShot) {
this.host = host;
this.port = port;
this.request = request;
this.oneShot = oneShot;
}
protected ChannelPipelineFactory setPipelineFactory() {
this.pipelineFactory = new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() {
return Channels.pipeline(
RpcUtil.constructRpcFrameDecoder(),
new SimpleTcpClientHandler(request));
}
};
return this.pipelineFactory;
}
public void run() {
// Configure the client.
ChannelFactory factory = new NioClientSocketChannelFactory(
Executors.newCachedThreadPool(), Executors.newCachedThreadPool(), 1, 1);
ClientBootstrap bootstrap = new ClientBootstrap(factory);
// Set up the pipeline factory.
bootstrap.setPipelineFactory(setPipelineFactory());
bootstrap.setOption("tcpNoDelay", true);
bootstrap.setOption("keepAlive", true);
// Start the connection attempt.
ChannelFuture future = bootstrap.connect(new InetSocketAddress(host, port));
if (oneShot) {
// Wait until the connection is closed or the connection attempt fails.
future.getChannel().getCloseFuture().awaitUninterruptibly();
// Shut down thread pools to exit.
bootstrap.releaseExternalResources();
}
}
}
| 3,048 | 33.647727 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* Represents RPC message MSG_ACCEPTED reply body. See RFC 1831 for details.
* This response is sent to a request to indicate success of the request.
*/
public class RpcAcceptedReply extends RpcReply {
public enum AcceptState {
// the order of the values below are significant.
SUCCESS, /* RPC executed successfully */
PROG_UNAVAIL, /* remote hasn't exported program */
PROG_MISMATCH, /* remote can't support version # */
PROC_UNAVAIL, /* program can't support procedure */
GARBAGE_ARGS, /* procedure can't decode params */
SYSTEM_ERR; /* e.g. memory allocation failure */
public static AcceptState fromValue(int value) {
return values()[value];
}
public int getValue() {
return ordinal();
}
};
public static RpcAcceptedReply getAcceptInstance(int xid,
Verifier verifier) {
return getInstance(xid, AcceptState.SUCCESS, verifier);
}
public static RpcAcceptedReply getInstance(int xid, AcceptState state,
Verifier verifier) {
return new RpcAcceptedReply(xid, ReplyState.MSG_ACCEPTED, verifier,
state);
}
private final AcceptState acceptState;
RpcAcceptedReply(int xid, ReplyState state, Verifier verifier,
AcceptState acceptState) {
super(xid, state, verifier);
this.acceptState = acceptState;
}
public static RpcAcceptedReply read(int xid, ReplyState replyState, XDR xdr) {
Verifier verifier = Verifier.readFlavorAndVerifier(xdr);
AcceptState acceptState = AcceptState.fromValue(xdr.readInt());
return new RpcAcceptedReply(xid, replyState, verifier, acceptState);
}
public AcceptState getAcceptState() {
return acceptState;
}
@Override
public XDR write(XDR xdr) {
xdr.writeInt(xid);
xdr.writeInt(messageType.getValue());
xdr.writeInt(replyState.getValue());
Verifier.writeFlavorAndVerifier(verifier, xdr);
xdr.writeInt(acceptState.getValue());
return xdr;
}
}
| 2,850 | 32.940476 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.net.InetSocketAddress;
import java.util.concurrent.Executors;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
/**
* Simple UDP server implemented using netty.
*/
public class SimpleTcpServer {
public static final Log LOG = LogFactory.getLog(SimpleTcpServer.class);
protected final int port;
protected int boundPort = -1; // Will be set after server starts
protected final SimpleChannelUpstreamHandler rpcProgram;
private ServerBootstrap server;
private Channel ch;
/** The maximum number of I/O worker threads */
protected final int workerCount;
/**
* @param port TCP port where to start the server at
* @param program RPC program corresponding to the server
* @param workercount Number of worker threads
*/
public SimpleTcpServer(int port, RpcProgram program, int workercount) {
this.port = port;
this.rpcProgram = program;
this.workerCount = workercount;
}
public void run() {
// Configure the Server.
ChannelFactory factory;
if (workerCount == 0) {
// Use default workers: 2 * the number of available processors
factory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(), Executors.newCachedThreadPool());
} else {
factory = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(), Executors.newCachedThreadPool(),
workerCount);
}
server = new ServerBootstrap(factory);
server.setPipelineFactory(new ChannelPipelineFactory() {
@Override
public ChannelPipeline getPipeline() throws Exception {
return Channels.pipeline(RpcUtil.constructRpcFrameDecoder(),
RpcUtil.STAGE_RPC_MESSAGE_PARSER, rpcProgram,
RpcUtil.STAGE_RPC_TCP_RESPONSE);
}
});
server.setOption("child.tcpNoDelay", true);
server.setOption("child.keepAlive", true);
// Listen to TCP port
ch = server.bind(new InetSocketAddress(port));
InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress();
boundPort = socketAddr.getPort();
LOG.info("Started listening to TCP requests at port " + boundPort + " for "
+ rpcProgram + " with workerCount " + workerCount);
}
// boundPort will be set only after server starts
public int getBoundPort() {
return this.boundPort;
}
public void shutdown() {
if (ch != null) {
ch.close().awaitUninterruptibly();
}
if (server != null) {
server.releaseExternalResources();
}
}
}
| 3,789 | 34.092593 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RegistrationClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.MessageEvent;
/**
* A simple client that registers an RPC program with portmap.
*/
public class RegistrationClient extends SimpleTcpClient {
public static final Log LOG = LogFactory.getLog(RegistrationClient.class);
public RegistrationClient(String host, int port, XDR request) {
super(host, port, request);
}
/**
* Handler to handle response from the server.
*/
static class RegistrationClientHandler extends SimpleTcpClientHandler {
public RegistrationClientHandler(XDR request) {
super(request);
}
private boolean validMessageLength(int len) {
// 28 bytes is the minimal success response size (portmapV2)
if (len < 28) {
if (LOG.isDebugEnabled()) {
LOG.debug("Portmap mapping registration failed,"
+ " the response size is less than 28 bytes:" + len);
}
return false;
}
return true;
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
ChannelBuffer buf = (ChannelBuffer) e.getMessage(); // Read reply
if (!validMessageLength(buf.readableBytes())) {
e.getChannel().close();
return;
}
// handling fragment header for TCP, 4 bytes.
byte[] fragmentHeader = Arrays.copyOfRange(buf.array(), 0, 4);
int fragmentSize = XDR.fragmentSize(fragmentHeader);
boolean isLast = XDR.isLastFragment(fragmentHeader);
assert (fragmentSize == 28 && isLast == true);
XDR xdr = new XDR();
xdr.writeFixedOpaque(Arrays.copyOfRange(buf.array(), 4,
buf.readableBytes()));
RpcReply reply = RpcReply.read(xdr);
if (reply.getState() == RpcReply.ReplyState.MSG_ACCEPTED) {
RpcAcceptedReply acceptedReply = (RpcAcceptedReply) reply;
handle(acceptedReply, xdr);
} else {
RpcDeniedReply deniedReply = (RpcDeniedReply) reply;
handle(deniedReply);
}
e.getChannel().close(); // shutdown now that request is complete
}
private void handle(RpcDeniedReply deniedReply) {
LOG.warn("Portmap mapping registration request was denied , " +
deniedReply);
}
private void handle(RpcAcceptedReply acceptedReply, XDR xdr) {
AcceptState acceptState = acceptedReply.getAcceptState();
assert (acceptState == AcceptState.SUCCESS);
boolean answer = xdr.readBoolean();
if (answer != true) {
LOG.warn("Portmap mapping registration failed, accept state:"
+ acceptState);
}
LOG.info("Portmap mapping registration succeeded");
}
}
}
| 3,731 | 34.542857 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.io.IOException;
import java.net.DatagramSocket;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
import org.apache.hadoop.oncrpc.security.Verifier;
import org.apache.hadoop.oncrpc.security.VerifierNone;
import org.apache.hadoop.portmap.PortmapMapping;
import org.apache.hadoop.portmap.PortmapRequest;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
/**
* Class for writing RPC server programs based on RFC 1050. Extend this class
* and implement {@link #handleInternal} to handle the requests received.
*/
public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
static final Log LOG = LogFactory.getLog(RpcProgram.class);
public static final int RPCB_PORT = 111;
private final String program;
private final String host;
private int port; // Ephemeral port is chosen later
private final int progNumber;
private final int lowProgVersion;
private final int highProgVersion;
protected final boolean allowInsecurePorts;
/**
* If not null, this will be used as the socket to use to connect to the
* system portmap daemon when registering this RPC server program.
*/
private final DatagramSocket registrationSocket;
/**
* Constructor
*
* @param program program name
* @param host host where the Rpc server program is started
* @param port port where the Rpc server program is listening to
* @param progNumber program number as defined in RFC 1050
* @param lowProgVersion lowest version of the specification supported
* @param highProgVersion highest version of the specification supported
* @param registrationSocket if not null, use this socket to register
* with portmap daemon
* @param allowInsecurePorts true to allow client connections from
* unprivileged ports, false otherwise
*/
protected RpcProgram(String program, String host, int port, int progNumber,
int lowProgVersion, int highProgVersion,
DatagramSocket registrationSocket, boolean allowInsecurePorts) {
this.program = program;
this.host = host;
this.port = port;
this.progNumber = progNumber;
this.lowProgVersion = lowProgVersion;
this.highProgVersion = highProgVersion;
this.registrationSocket = registrationSocket;
this.allowInsecurePorts = allowInsecurePorts;
LOG.info("Will " + (allowInsecurePorts ? "" : "not ") + "accept client "
+ "connections from unprivileged ports");
}
/**
* Register this program with the local portmapper.
*/
public void register(int transport, int boundPort) {
if (boundPort != port) {
LOG.info("The bound port is " + boundPort
+ ", different with configured port " + port);
port = boundPort;
}
// Register all the program versions with portmapper for a given transport
for (int vers = lowProgVersion; vers <= highProgVersion; vers++) {
PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport,
port);
register(mapEntry, true);
}
}
/**
* Unregister this program with the local portmapper.
*/
public void unregister(int transport, int boundPort) {
if (boundPort != port) {
LOG.info("The bound port is " + boundPort
+ ", different with configured port " + port);
port = boundPort;
}
// Unregister all the program versions with portmapper for a given transport
for (int vers = lowProgVersion; vers <= highProgVersion; vers++) {
PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport,
port);
register(mapEntry, false);
}
}
/**
* Register the program with Portmap or Rpcbind
*/
protected void register(PortmapMapping mapEntry, boolean set) {
XDR mappingRequest = PortmapRequest.create(mapEntry, set);
SimpleUdpClient registrationClient = new SimpleUdpClient(host, RPCB_PORT,
mappingRequest, registrationSocket);
try {
registrationClient.run();
} catch (IOException e) {
String request = set ? "Registration" : "Unregistration";
LOG.error(request + " failure with " + host + ":" + port
+ ", portmap entry: " + mapEntry);
throw new RuntimeException(request + " failure", e);
}
}
// Start extra daemons or services
public void startDaemons() {}
public void stopDaemons() {}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
RpcInfo info = (RpcInfo) e.getMessage();
RpcCall call = (RpcCall) info.header();
SocketAddress remoteAddress = info.remoteAddress();
if (LOG.isTraceEnabled()) {
LOG.trace(program + " procedure #" + call.getProcedure());
}
if (this.progNumber != call.getProgram()) {
LOG.warn("Invalid RPC call program " + call.getProgram());
sendAcceptedReply(call, remoteAddress, AcceptState.PROG_UNAVAIL, ctx);
return;
}
int ver = call.getVersion();
if (ver < lowProgVersion || ver > highProgVersion) {
LOG.warn("Invalid RPC call version " + ver);
sendAcceptedReply(call, remoteAddress, AcceptState.PROG_MISMATCH, ctx);
return;
}
handleInternal(ctx, info);
}
public boolean doPortMonitoring(SocketAddress remoteAddress) {
if (!allowInsecurePorts) {
if (LOG.isTraceEnabled()) {
LOG.trace("Will not allow connections from unprivileged ports. "
+ "Checking for valid client port...");
}
if (remoteAddress instanceof InetSocketAddress) {
InetSocketAddress inetRemoteAddress = (InetSocketAddress) remoteAddress;
if (inetRemoteAddress.getPort() > 1023) {
LOG.warn("Connection attempted from '" + inetRemoteAddress + "' "
+ "which is an unprivileged port. Rejecting connection.");
return false;
}
} else {
LOG.warn("Could not determine remote port of socket address '"
+ remoteAddress + "'. Rejecting connection.");
return false;
}
}
return true;
}
private void sendAcceptedReply(RpcCall call, SocketAddress remoteAddress,
AcceptState acceptState, ChannelHandlerContext ctx) {
RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
acceptState, Verifier.VERIFIER_NONE);
XDR out = new XDR();
reply.write(out);
if (acceptState == AcceptState.PROG_MISMATCH) {
out.writeInt(lowProgVersion);
out.writeInt(highProgVersion);
}
ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(b, remoteAddress);
RpcUtil.sendRpcResponse(ctx, rsp);
}
protected static void sendRejectedReply(RpcCall call,
SocketAddress remoteAddress, ChannelHandlerContext ctx) {
XDR out = new XDR();
RpcDeniedReply reply = new RpcDeniedReply(call.getXid(),
RpcReply.ReplyState.MSG_DENIED,
RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
reply.write(out);
ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
.buffer());
RpcResponse rsp = new RpcResponse(buf, remoteAddress);
RpcUtil.sendRpcResponse(ctx, rsp);
}
protected abstract void handleInternal(ChannelHandlerContext ctx, RpcInfo info);
@Override
public String toString() {
return "Rpc program: " + program + " at " + host + ":" + port;
}
protected abstract boolean isIdempotent(RpcCall call);
public int getPort() {
return port;
}
}
| 8,698 | 35.860169 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.net.SocketAddress;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext;
/**
* RpcInfo records all contextual information of an RPC message. It contains
* the RPC header, the parameters, and the information of the remote peer.
*/
public final class RpcInfo {
private final RpcMessage header;
private final ChannelBuffer data;
private final Channel channel;
private final SocketAddress remoteAddress;
public RpcInfo(RpcMessage header, ChannelBuffer data,
ChannelHandlerContext channelContext, Channel channel,
SocketAddress remoteAddress) {
this.header = header;
this.data = data;
this.channel = channel;
this.remoteAddress = remoteAddress;
}
public RpcMessage header() {
return header;
}
public ChannelBuffer data() {
return data;
}
public Channel channel() {
return channel;
}
public SocketAddress remoteAddress() {
return remoteAddress;
}
}
| 1,864 | 29.57377 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCallCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.net.InetAddress;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import com.google.common.annotations.VisibleForTesting;
/**
* This class is used for handling the duplicate <em>non-idempotenty</em> Rpc
* calls. A non-idempotent request is processed as follows:
* <ul>
* <li>If the request is being processed for the first time, its state is
* in-progress in cache.</li>
* <li>If the request is retransimitted and is in-progress state, it is ignored.
* </li>
* <li>If the request is retransimitted and is completed, the previous response
* from the cache is sent back to the client.</li>
* </ul>
* <br>
* A request is identified by the client ID (address of the client) and
* transaction ID (xid) from the Rpc call.
*
*/
public class RpcCallCache {
public static class CacheEntry {
private RpcResponse response; // null if no response has been sent
public CacheEntry() {
response = null;
}
public boolean isInProgress() {
return response == null;
}
public boolean isCompleted() {
return response != null;
}
public RpcResponse getResponse() {
return response;
}
public void setResponse(RpcResponse response) {
this.response = response;
}
}
/**
* Call that is used to track a client in the {@link RpcCallCache}
*/
public static class ClientRequest {
protected final InetAddress clientId;
protected final int xid;
public InetAddress getClientId() {
return clientId;
}
public ClientRequest(InetAddress clientId, int xid) {
this.clientId = clientId;
this.xid = xid;
}
@Override
public int hashCode() {
return xid + clientId.hashCode() * 31;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || !(obj instanceof ClientRequest)) {
return false;
}
ClientRequest other = (ClientRequest) obj;
return clientId.equals(other.clientId) && (xid == other.xid);
}
}
private final String program;
private final Map<ClientRequest, CacheEntry> map;
public RpcCallCache(final String program, final int maxEntries) {
if (maxEntries <= 0) {
throw new IllegalArgumentException("Cache size is " + maxEntries
+ ". Should be > 0");
}
this.program = program;
map = new LinkedHashMap<ClientRequest, CacheEntry>() {
private static final long serialVersionUID = 1L;
@Override
protected boolean removeEldestEntry(
java.util.Map.Entry<ClientRequest, CacheEntry> eldest) {
return RpcCallCache.this.size() > maxEntries;
}
};
}
/** Return the program name */
public String getProgram() {
return program;
}
/** Mark a request as completed and add corresponding response to the cache */
public void callCompleted(InetAddress clientId, int xid, RpcResponse response) {
ClientRequest req = new ClientRequest(clientId, xid);
CacheEntry e;
synchronized(map) {
e = map.get(req);
}
e.response = response;
}
/**
* Check the cache for an entry. If it does not exist, add the request
* as in progress.
*/
public CacheEntry checkOrAddToCache(InetAddress clientId, int xid) {
ClientRequest req = new ClientRequest(clientId, xid);
CacheEntry e;
synchronized(map) {
e = map.get(req);
if (e == null) {
// Add an inprogress cache entry
map.put(req, new CacheEntry());
}
}
return e;
}
/** Return number of cached entries */
public int size() {
return map.size();
}
/**
* Iterator to the cache entries
* @return iterator
*/
@VisibleForTesting
public Iterator<Entry<ClientRequest, CacheEntry>> iterator() {
return map.entrySet().iterator();
}
}
| 4,774 | 26.923977 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import org.apache.hadoop.oncrpc.security.Verifier;
/**
* Represents RPC message MSG_DENIED reply body. See RFC 1831 for details.
* This response is sent to a request to indicate failure of the request.
*/
public class RpcDeniedReply extends RpcReply {
public enum RejectState {
// the order of the values below are significant.
RPC_MISMATCH,
AUTH_ERROR;
int getValue() {
return ordinal();
}
static RejectState fromValue(int value) {
return values()[value];
}
}
private final RejectState rejectState;
public RpcDeniedReply(int xid, ReplyState replyState,
RejectState rejectState, Verifier verifier) {
super(xid, replyState, verifier);
this.rejectState = rejectState;
}
public static RpcDeniedReply read(int xid, ReplyState replyState, XDR xdr) {
Verifier verifier = Verifier.readFlavorAndVerifier(xdr);
RejectState rejectState = RejectState.fromValue(xdr.readInt());
return new RpcDeniedReply(xid, replyState, rejectState, verifier);
}
public RejectState getRejectState() {
return rejectState;
}
@Override
public String toString() {
return new StringBuffer().append("xid:").append(xid)
.append(",messageType:").append(messageType).append("verifier_flavor:")
.append(verifier.getFlavor()).append("rejectState:")
.append(rejectState).toString();
}
@Override
public XDR write(XDR xdr) {
xdr.writeInt(xid);
xdr.writeInt(messageType.getValue());
xdr.writeInt(replyState.getValue());
Verifier.writeFlavorAndVerifier(verifier, xdr);
xdr.writeInt(rejectState.getValue());
return xdr;
}
}
| 2,490 | 31.350649 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.nio.ByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.handler.codec.frame.FrameDecoder;
public final class RpcUtil {
/**
* The XID in RPC call. It is used for starting with new seed after each
* reboot.
*/
private static int xid = (int) (System.currentTimeMillis() / 1000) << 12;
public static int getNewXid(String caller) {
return xid = ++xid + caller.hashCode();
}
public static void sendRpcResponse(ChannelHandlerContext ctx,
RpcResponse response) {
Channels.fireMessageReceived(ctx, response);
}
public static FrameDecoder constructRpcFrameDecoder() {
return new RpcFrameDecoder();
}
public static final SimpleChannelUpstreamHandler STAGE_RPC_MESSAGE_PARSER = new RpcMessageParserStage();
public static final SimpleChannelUpstreamHandler STAGE_RPC_TCP_RESPONSE = new RpcTcpResponseStage();
public static final SimpleChannelUpstreamHandler STAGE_RPC_UDP_RESPONSE = new RpcUdpResponseStage();
/**
* An RPC client can separate a RPC message into several frames (i.e.,
* fragments) when transferring it across the wire. RpcFrameDecoder
* reconstructs a full RPC message from these fragments.
*
* RpcFrameDecoder is a stateful pipeline stage. It has to be constructed for
* each RPC client.
*/
static class RpcFrameDecoder extends FrameDecoder {
public static final Log LOG = LogFactory.getLog(RpcFrameDecoder.class);
private ChannelBuffer currentFrame;
@Override
protected Object decode(ChannelHandlerContext ctx, Channel channel,
ChannelBuffer buf) {
if (buf.readableBytes() < 4)
return null;
buf.markReaderIndex();
byte[] fragmentHeader = new byte[4];
buf.readBytes(fragmentHeader);
int length = XDR.fragmentSize(fragmentHeader);
boolean isLast = XDR.isLastFragment(fragmentHeader);
if (buf.readableBytes() < length) {
buf.resetReaderIndex();
return null;
}
ChannelBuffer newFragment = buf.readSlice(length);
if (currentFrame == null) {
currentFrame = newFragment;
} else {
currentFrame = ChannelBuffers.wrappedBuffer(currentFrame, newFragment);
}
if (isLast) {
ChannelBuffer completeFrame = currentFrame;
currentFrame = null;
return completeFrame;
} else {
return null;
}
}
}
/**
* RpcMessageParserStage parses the network bytes and encapsulates the RPC
* request into a RpcInfo instance.
*/
static final class RpcMessageParserStage extends SimpleChannelUpstreamHandler {
private static final Log LOG = LogFactory
.getLog(RpcMessageParserStage.class);
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
ChannelBuffer buf = (ChannelBuffer) e.getMessage();
ByteBuffer b = buf.toByteBuffer().asReadOnlyBuffer();
XDR in = new XDR(b, XDR.State.READING);
RpcInfo info = null;
try {
RpcCall callHeader = RpcCall.read(in);
ChannelBuffer dataBuffer = ChannelBuffers.wrappedBuffer(in.buffer()
.slice());
info = new RpcInfo(callHeader, dataBuffer, ctx, e.getChannel(),
e.getRemoteAddress());
} catch (Exception exc) {
LOG.info("Malformed RPC request from " + e.getRemoteAddress());
}
if (info != null) {
Channels.fireMessageReceived(ctx, info);
}
}
}
/**
* RpcTcpResponseStage sends an RpcResponse across the wire with the
* appropriate fragment header.
*/
private static class RpcTcpResponseStage extends SimpleChannelUpstreamHandler {
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
RpcResponse r = (RpcResponse) e.getMessage();
byte[] fragmentHeader = XDR.recordMark(r.data().readableBytes(), true);
ChannelBuffer header = ChannelBuffers.wrappedBuffer(fragmentHeader);
ChannelBuffer d = ChannelBuffers.wrappedBuffer(header, r.data());
e.getChannel().write(d);
}
}
/**
* RpcUdpResponseStage sends an RpcResponse as a UDP packet, which does not
* require a fragment header.
*/
private static final class RpcUdpResponseStage extends
SimpleChannelUpstreamHandler {
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)
throws Exception {
RpcResponse r = (RpcResponse) e.getMessage();
e.getChannel().write(r.data(), r.remoteAddress());
}
}
}
| 5,787 | 33.248521 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import org.apache.hadoop.oncrpc.security.RpcAuthInfo;
import org.apache.hadoop.oncrpc.security.Verifier;
import com.google.common.base.Preconditions;
/**
* Represents an RPC message of type RPC reply as defined in RFC 1831
*/
public abstract class RpcReply extends RpcMessage {
/** RPC reply_stat as defined in RFC 1831 */
public enum ReplyState {
// the order of the values below are significant.
MSG_ACCEPTED,
MSG_DENIED;
int getValue() {
return ordinal();
}
public static ReplyState fromValue(int value) {
return values()[value];
}
}
protected final ReplyState replyState;
protected final Verifier verifier;
RpcReply(int xid, ReplyState state, Verifier verifier) {
super(xid, RpcMessage.Type.RPC_REPLY);
this.replyState = state;
this.verifier = verifier;
}
public RpcAuthInfo getVerifier() {
return verifier;
}
public static RpcReply read(XDR xdr) {
int xid = xdr.readInt();
final Type messageType = Type.fromValue(xdr.readInt());
Preconditions.checkState(messageType == RpcMessage.Type.RPC_REPLY);
ReplyState stat = ReplyState.fromValue(xdr.readInt());
switch (stat) {
case MSG_ACCEPTED:
return RpcAcceptedReply.read(xid, stat, xdr);
case MSG_DENIED:
return RpcDeniedReply.read(xid, stat, xdr);
}
return null;
}
public ReplyState getState() {
return replyState;
}
}
| 2,276 | 28.960526 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.nio.ByteBuffer;
import org.apache.commons.io.Charsets;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* Utility class for building XDR messages based on RFC 4506.
*
* Key points of the format:
*
* <ul>
* <li>Primitives are stored in big-endian order (i.e., the default byte order
* of ByteBuffer).</li>
* <li>Booleans are stored as an integer.</li>
* <li>Each field in the message is always aligned by 4.</li>
* </ul>
*
*/
public final class XDR {
private static final int DEFAULT_INITIAL_CAPACITY = 256;
private static final int SIZEOF_INT = 4;
private static final int SIZEOF_LONG = 8;
private static final byte[] PADDING_BYTES = new byte[] { 0, 0, 0, 0 };
private ByteBuffer buf;
public enum State {
READING, WRITING,
}
private final State state;
/**
* Construct a new XDR message buffer.
*
* @param initialCapacity
* the initial capacity of the buffer.
*/
public XDR(int initialCapacity) {
this(ByteBuffer.allocate(initialCapacity), State.WRITING);
}
public XDR() {
this(DEFAULT_INITIAL_CAPACITY);
}
public XDR(ByteBuffer buf, State state) {
this.buf = buf;
this.state = state;
}
/**
* Wraps a byte array as a read-only XDR message. There's no copy involved,
* thus it is the client's responsibility to ensure that the byte array
* remains unmodified when using the XDR object.
*
* @param src
* the byte array to be wrapped.
*/
public XDR(byte[] src) {
this(ByteBuffer.wrap(src).asReadOnlyBuffer(), State.READING);
}
public XDR asReadOnlyWrap() {
ByteBuffer b = buf.asReadOnlyBuffer();
if (state == State.WRITING) {
b.flip();
}
XDR n = new XDR(b, State.READING);
return n;
}
public ByteBuffer buffer() {
return buf.duplicate();
}
public int size() {
// TODO: This overloading intends to be compatible with the semantics of
// the previous version of the class. This function should be separated into
// two with clear semantics.
return state == State.READING ? buf.limit() : buf.position();
}
public int readInt() {
Preconditions.checkState(state == State.READING);
return buf.getInt();
}
public void writeInt(int v) {
ensureFreeSpace(SIZEOF_INT);
buf.putInt(v);
}
public boolean readBoolean() {
Preconditions.checkState(state == State.READING);
return buf.getInt() != 0;
}
public void writeBoolean(boolean v) {
ensureFreeSpace(SIZEOF_INT);
buf.putInt(v ? 1 : 0);
}
public long readHyper() {
Preconditions.checkState(state == State.READING);
return buf.getLong();
}
public void writeLongAsHyper(long v) {
ensureFreeSpace(SIZEOF_LONG);
buf.putLong(v);
}
public byte[] readFixedOpaque(int size) {
Preconditions.checkState(state == State.READING);
byte[] r = new byte[size];
buf.get(r);
alignPosition();
return r;
}
public void writeFixedOpaque(byte[] src, int length) {
ensureFreeSpace(alignUp(length));
buf.put(src, 0, length);
writePadding();
}
public void writeFixedOpaque(byte[] src) {
writeFixedOpaque(src, src.length);
}
public byte[] readVariableOpaque() {
Preconditions.checkState(state == State.READING);
int size = readInt();
return readFixedOpaque(size);
}
public void writeVariableOpaque(byte[] src) {
ensureFreeSpace(SIZEOF_INT + alignUp(src.length));
buf.putInt(src.length);
writeFixedOpaque(src);
}
public String readString() {
return new String(readVariableOpaque(), Charsets.UTF_8);
}
public void writeString(String s) {
writeVariableOpaque(s.getBytes(Charsets.UTF_8));
}
private void writePadding() {
Preconditions.checkState(state == State.WRITING);
int p = pad(buf.position());
ensureFreeSpace(p);
buf.put(PADDING_BYTES, 0, p);
}
private int alignUp(int length) {
return length + pad(length);
}
private int pad(int length) {
switch (length % 4) {
case 1:
return 3;
case 2:
return 2;
case 3:
return 1;
default:
return 0;
}
}
private void alignPosition() {
buf.position(alignUp(buf.position()));
}
private void ensureFreeSpace(int size) {
Preconditions.checkState(state == State.WRITING);
if (buf.remaining() < size) {
int newCapacity = buf.capacity() * 2;
int newRemaining = buf.capacity() + buf.remaining();
while (newRemaining < size) {
newRemaining += newCapacity;
newCapacity *= 2;
}
ByteBuffer newbuf = ByteBuffer.allocate(newCapacity);
buf.flip();
newbuf.put(buf);
buf = newbuf;
}
}
/** check if the rest of data has more than len bytes */
public static boolean verifyLength(XDR xdr, int len) {
return xdr.buf.remaining() >= len;
}
static byte[] recordMark(int size, boolean last) {
byte[] b = new byte[SIZEOF_INT];
ByteBuffer buf = ByteBuffer.wrap(b);
buf.putInt(!last ? size : size | 0x80000000);
return b;
}
/** Write an XDR message to a TCP ChannelBuffer */
public static ChannelBuffer writeMessageTcp(XDR request, boolean last) {
Preconditions.checkState(request.state == XDR.State.WRITING);
ByteBuffer b = request.buf.duplicate();
b.flip();
byte[] fragmentHeader = XDR.recordMark(b.limit(), last);
ByteBuffer headerBuf = ByteBuffer.wrap(fragmentHeader);
// TODO: Investigate whether making a copy of the buffer is necessary.
return ChannelBuffers.copiedBuffer(headerBuf, b);
}
/** Write an XDR message to a UDP ChannelBuffer */
public static ChannelBuffer writeMessageUdp(XDR response) {
Preconditions.checkState(response.state == XDR.State.READING);
// TODO: Investigate whether making a copy of the buffer is necessary.
return ChannelBuffers.copiedBuffer(response.buf);
}
public static int fragmentSize(byte[] mark) {
ByteBuffer b = ByteBuffer.wrap(mark);
int n = b.getInt();
return n & 0x7fffffff;
}
public static boolean isLastFragment(byte[] mark) {
ByteBuffer b = ByteBuffer.wrap(mark);
int n = b.getInt();
return (n & 0x80000000) != 0;
}
@VisibleForTesting
public byte[] getBytes() {
ByteBuffer d = asReadOnlyWrap().buffer();
byte[] b = new byte[d.remaining()];
d.get(b);
return b;
}
}
| 7,328 | 25.846154 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.net.SocketAddress;
import org.jboss.netty.buffer.ChannelBuffer;
/**
* RpcResponse encapsulates a response to a RPC request. It contains the data
* that is going to cross the wire, as well as the information of the remote
* peer.
*/
public class RpcResponse {
private final ChannelBuffer data;
private final SocketAddress remoteAddress;
public RpcResponse(ChannelBuffer data, SocketAddress remoteAddress) {
this.data = data;
this.remoteAddress = remoteAddress;
}
public ChannelBuffer data() {
return data;
}
public SocketAddress remoteAddress() {
return remoteAddress;
}
}
| 1,467 | 30.913043 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.net.InetSocketAddress;
import java.util.concurrent.Executors;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jboss.netty.bootstrap.ConnectionlessBootstrap;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.channel.socket.DatagramChannelFactory;
import org.jboss.netty.channel.socket.nio.NioDatagramChannelFactory;
/**
* Simple UDP server implemented based on netty.
*/
public class SimpleUdpServer {
public static final Log LOG = LogFactory.getLog(SimpleUdpServer.class);
private final int SEND_BUFFER_SIZE = 65536;
private final int RECEIVE_BUFFER_SIZE = 65536;
protected final int port;
protected final SimpleChannelUpstreamHandler rpcProgram;
protected final int workerCount;
protected int boundPort = -1; // Will be set after server starts
private ConnectionlessBootstrap server;
private Channel ch;
public SimpleUdpServer(int port, SimpleChannelUpstreamHandler program,
int workerCount) {
this.port = port;
this.rpcProgram = program;
this.workerCount = workerCount;
}
public void run() {
// Configure the client.
DatagramChannelFactory f = new NioDatagramChannelFactory(
Executors.newCachedThreadPool(), workerCount);
server = new ConnectionlessBootstrap(f);
server.setPipeline(Channels.pipeline(RpcUtil.STAGE_RPC_MESSAGE_PARSER,
rpcProgram, RpcUtil.STAGE_RPC_UDP_RESPONSE));
server.setOption("broadcast", "false");
server.setOption("sendBufferSize", SEND_BUFFER_SIZE);
server.setOption("receiveBufferSize", RECEIVE_BUFFER_SIZE);
// Listen to the UDP port
ch = server.bind(new InetSocketAddress(port));
InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress();
boundPort = socketAddr.getPort();
LOG.info("Started listening to UDP requests at port " + boundPort + " for "
+ rpcProgram + " with workerCount " + workerCount);
}
// boundPort will be set only after server starts
public int getBoundPort() {
return this.boundPort;
}
public void shutdown() {
if (ch != null) {
ch.close().awaitUninterruptibly();
}
if (server != null) {
server.releaseExternalResources();
}
}
}
| 3,180 | 34.344444 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierGSS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import org.apache.hadoop.oncrpc.XDR;
/** Verifier mapped to RPCSEC_GSS. */
public class VerifierGSS extends Verifier {
public VerifierGSS() {
super(AuthFlavor.RPCSEC_GSS);
}
@Override
public void read(XDR xdr) {
// TODO Auto-generated method stub
}
@Override
public void write(XDR xdr) {
// TODO Auto-generated method stub
}
}
| 1,221 | 28.095238 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.XDR;
/**
* Base class for all credentials. Currently we only support 3 different types
* of auth flavors: AUTH_NONE, AUTH_SYS, and RPCSEC_GSS.
*/
public abstract class Credentials extends RpcAuthInfo {
public static final Log LOG = LogFactory.getLog(Credentials.class);
public static Credentials readFlavorAndCredentials(XDR xdr) {
AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt());
final Credentials credentials;
if(flavor == AuthFlavor.AUTH_NONE) {
credentials = new CredentialsNone();
} else if(flavor == AuthFlavor.AUTH_SYS) {
credentials = new CredentialsSys();
} else if(flavor == AuthFlavor.RPCSEC_GSS) {
credentials = new CredentialsGSS();
} else {
throw new UnsupportedOperationException("Unsupported Credentials Flavor "
+ flavor);
}
credentials.read(xdr);
return credentials;
}
/**
* Write AuthFlavor and the credentials to the XDR
*/
public static void writeFlavorAndCredentials(Credentials cred, XDR xdr) {
if (cred instanceof CredentialsNone) {
xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
} else if (cred instanceof CredentialsSys) {
xdr.writeInt(AuthFlavor.AUTH_SYS.getValue());
} else if (cred instanceof CredentialsGSS) {
xdr.writeInt(AuthFlavor.RPCSEC_GSS.getValue());
} else {
throw new UnsupportedOperationException("Cannot recognize the verifier");
}
cred.write(xdr);
}
protected int mCredentialsLength;
protected Credentials(AuthFlavor flavor) {
super(flavor);
}
}
| 2,524 | 35.071429 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.oncrpc.XDR;
/** Credential used by AUTH_SYS */
public class CredentialsSys extends Credentials {
private static final String HOSTNAME;
static {
try {
String s = InetAddress.getLocalHost().getHostName();
HOSTNAME = s;
if(LOG.isDebugEnabled()) {
LOG.debug("HOSTNAME = " + HOSTNAME);
}
} catch (UnknownHostException e) {
LOG.error("Error setting HOSTNAME", e);
throw new RuntimeException(e);
}
}
protected int mUID, mGID;
protected int[] mAuxGIDs;
protected String mHostName;
protected int mStamp;
public CredentialsSys() {
super(AuthFlavor.AUTH_SYS);
this.mCredentialsLength = 0;
this.mHostName = HOSTNAME;
}
public int getGID() {
return mGID;
}
public int getUID() {
return mUID;
}
public int[] getAuxGIDs() {
return mAuxGIDs;
}
public void setGID(int gid) {
this.mGID = gid;
}
public void setUID(int uid) {
this.mUID = uid;
}
public void setStamp(int stamp) {
this.mStamp = stamp;
}
@Override
public void read(XDR xdr) {
mCredentialsLength = xdr.readInt();
mStamp = xdr.readInt();
mHostName = xdr.readString();
mUID = xdr.readInt();
mGID = xdr.readInt();
int length = xdr.readInt();
mAuxGIDs = new int[length];
for (int i = 0; i < length; i++) {
mAuxGIDs[i] = xdr.readInt();
}
}
@Override
public void write(XDR xdr) {
// mStamp + mHostName.length + mHostName + mUID + mGID + mAuxGIDs.count
mCredentialsLength = 20 + mHostName.getBytes(Charsets.UTF_8).length;
// mAuxGIDs
if (mAuxGIDs != null && mAuxGIDs.length > 0) {
mCredentialsLength += mAuxGIDs.length * 4;
}
xdr.writeInt(mCredentialsLength);
xdr.writeInt(mStamp);
xdr.writeString(mHostName);
xdr.writeInt(mUID);
xdr.writeInt(mGID);
if((mAuxGIDs == null) || (mAuxGIDs.length == 0)) {
xdr.writeInt(0);
} else {
xdr.writeInt(mAuxGIDs.length);
for (int i = 0; i < mAuxGIDs.length; i++) {
xdr.writeInt(mAuxGIDs[i]);
}
}
}
}
| 3,065 | 24.55 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
import org.apache.hadoop.oncrpc.RpcCall;
import org.apache.hadoop.security.IdMappingConstant;
import org.apache.hadoop.security.IdMappingServiceProvider;
public class SysSecurityHandler extends SecurityHandler {
private final IdMappingServiceProvider iug;
private final CredentialsSys mCredentialsSys;
public SysSecurityHandler(CredentialsSys credentialsSys,
IdMappingServiceProvider iug) {
this.mCredentialsSys = credentialsSys;
this.iug = iug;
}
@Override
public String getUser() {
return iug.getUserName(mCredentialsSys.getUID(),
IdMappingConstant.UNKNOWN_USER);
}
@Override
public boolean shouldSilentlyDrop(RpcCall request) {
return false;
}
@Override
public VerifierNone getVerifer(RpcCall request) {
return new VerifierNone();
}
@Override
public int getUid() {
return mCredentialsSys.getUID();
}
@Override
public int getGid() {
return mCredentialsSys.getGID();
}
@Override
public int[] getAuxGids() {
return mCredentialsSys.getAuxGIDs();
}
}
| 1,957 | 28.223881 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import org.apache.hadoop.oncrpc.XDR;
import com.google.common.base.Preconditions;
/** Credential used by AUTH_NONE */
public class CredentialsNone extends Credentials {
public CredentialsNone() {
super(AuthFlavor.AUTH_NONE);
mCredentialsLength = 0;
}
@Override
public void read(XDR xdr) {
mCredentialsLength = xdr.readInt();
Preconditions.checkState(mCredentialsLength == 0);
}
@Override
public void write(XDR xdr) {
Preconditions.checkState(mCredentialsLength == 0);
xdr.writeInt(mCredentialsLength);
}
}
| 1,402 | 30.886364 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import org.apache.hadoop.oncrpc.XDR;
import com.google.common.base.Preconditions;
/** Verifier used by AUTH_NONE. */
public class VerifierNone extends Verifier {
public VerifierNone() {
super(AuthFlavor.AUTH_NONE);
}
@Override
public void read(XDR xdr) {
int length = xdr.readInt();
Preconditions.checkState(length == 0);
}
@Override
public void write(XDR xdr) {
xdr.writeInt(0);
}
}
| 1,272 | 29.309524 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsGSS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import org.apache.hadoop.oncrpc.XDR;
/** Credential used by RPCSEC_GSS */
public class CredentialsGSS extends Credentials {
public CredentialsGSS() {
super(AuthFlavor.RPCSEC_GSS);
}
@Override
public void read(XDR xdr) {
// TODO Auto-generated method stub
}
@Override
public void write(XDR xdr) {
// TODO Auto-generated method stub
}
}
| 1,229 | 28.285714 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/RpcAuthInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import org.apache.hadoop.oncrpc.XDR;
/**
* Authentication Info. Base class of Verifier and Credential.
*/
public abstract class RpcAuthInfo {
/** Different types of authentication as defined in RFC 1831 */
public enum AuthFlavor {
AUTH_NONE(0),
AUTH_SYS(1),
AUTH_SHORT(2),
AUTH_DH(3),
RPCSEC_GSS(6);
private int value;
AuthFlavor(int value) {
this.value = value;
}
public int getValue() {
return value;
}
static AuthFlavor fromValue(int value) {
for (AuthFlavor v : values()) {
if (v.value == value) {
return v;
}
}
throw new IllegalArgumentException("Invalid AuthFlavor value " + value);
}
}
private final AuthFlavor flavor;
protected RpcAuthInfo(AuthFlavor flavor) {
this.flavor = flavor;
}
/** Load auth info */
public abstract void read(XDR xdr);
/** Write auth info */
public abstract void write(XDR xdr);
public AuthFlavor getFlavor() {
return flavor;
}
@Override
public String toString() {
return "(AuthFlavor:" + flavor + ")";
}
}
| 1,978 | 25.386667 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import org.apache.hadoop.oncrpc.XDR;
/**
* Base class for verifier. Currently our authentication only supports 3 types
* of auth flavors: {@link RpcAuthInfo.AuthFlavor#AUTH_NONE}, {@link RpcAuthInfo.AuthFlavor#AUTH_SYS},
* and {@link RpcAuthInfo.AuthFlavor#RPCSEC_GSS}. Thus for verifier we only need to handle
* AUTH_NONE and RPCSEC_GSS
*/
public abstract class Verifier extends RpcAuthInfo {
public static final Verifier VERIFIER_NONE = new VerifierNone();
protected Verifier(AuthFlavor flavor) {
super(flavor);
}
/** Read both AuthFlavor and the verifier from the XDR */
public static Verifier readFlavorAndVerifier(XDR xdr) {
AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt());
final Verifier verifer;
if(flavor == AuthFlavor.AUTH_NONE) {
verifer = new VerifierNone();
} else if(flavor == AuthFlavor.RPCSEC_GSS) {
verifer = new VerifierGSS();
} else {
throw new UnsupportedOperationException("Unsupported verifier flavor"
+ flavor);
}
verifer.read(xdr);
return verifer;
}
/**
* Write AuthFlavor and the verifier to the XDR
*/
public static void writeFlavorAndVerifier(Verifier verifier, XDR xdr) {
if (verifier instanceof VerifierNone) {
xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
} else if (verifier instanceof VerifierGSS) {
xdr.writeInt(AuthFlavor.RPCSEC_GSS.getValue());
} else {
throw new UnsupportedOperationException("Cannot recognize the verifier");
}
verifier.write(xdr);
}
}
| 2,389 | 35.212121 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc.security;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.oncrpc.RpcCall;
import org.apache.hadoop.oncrpc.XDR;
public abstract class SecurityHandler {
public static final Log LOG = LogFactory.getLog(SecurityHandler.class);
public abstract String getUser();
public abstract boolean shouldSilentlyDrop(RpcCall request);
public abstract Verifier getVerifer(RpcCall request) throws IOException;
public boolean isUnwrapRequired() {
return false;
}
public boolean isWrapRequired() {
return false;
}
/** Used by GSS */
public XDR unwrap(RpcCall request, byte[] data ) throws IOException {
throw new UnsupportedOperationException();
}
/** Used by GSS */
public byte[] wrap(RpcCall request, XDR response) throws IOException {
throw new UnsupportedOperationException();
}
/** Used by AUTH_SYS */
public int getUid() {
throw new UnsupportedOperationException();
}
/** Used by AUTH_SYS */
public int getGid() {
throw new UnsupportedOperationException();
}
/** Used by AUTH_SYS */
public int[] getAuxGids() {
throw new UnsupportedOperationException();
}
}
| 2,068 | 28.985507 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceAudience.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.classification;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* Annotation to inform users of a package, class or method's intended audience.
* Currently the audience can be {@link Public}, {@link LimitedPrivate} or
* {@link Private}. <br>
* All public classes must have InterfaceAudience annotation. <br>
* <ul>
* <li>Public classes that are not marked with this annotation must be
* considered by default as {@link Private}.</li>
*
* <li>External applications must only use classes that are marked
* {@link Public}. Avoid using non public classes as these classes
* could be removed or change in incompatible ways.</li>
*
* <li>Hadoop projects must only use classes that are marked
* {@link LimitedPrivate} or {@link Public}</li>
*
* <li> Methods may have a different annotation that it is more restrictive
* compared to the audience classification of the class. Example: A class
* might be {@link Public}, but a method may be {@link LimitedPrivate}
* </li></ul>
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class InterfaceAudience {
/**
* Intended for use by any project or application.
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface Public {};
/**
* Intended only for the project(s) specified in the annotation.
* For example, "Common", "HDFS", "MapReduce", "ZooKeeper", "HBase".
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface LimitedPrivate {
String[] value();
};
/**
* Intended for use only within Hadoop itself.
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface Private {};
private InterfaceAudience() {} // Audience can't exist on its own
}
| 2,633 | 34.594595 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.classification;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
/**
* Annotation to inform users of how much to rely on a particular package,
* class or method not changing over time. Currently the stability can be
* {@link Stable}, {@link Evolving} or {@link Unstable}. <br>
*
* <ul><li>All classes that are annotated with {@link Public} or
* {@link LimitedPrivate} must have InterfaceStability annotation. </li>
* <li>Classes that are {@link Private} are to be considered unstable unless
* a different InterfaceStability annotation states otherwise.</li>
* <li>Incompatible changes must not be made to classes marked as stable.</li>
* </ul>
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class InterfaceStability {
/**
* Can evolve while retaining compatibility for minor release boundaries.;
* can break compatibility only at major release (ie. at m.0).
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface Stable {};
/**
* Evolving, but can break compatibility at minor release (i.e. m.x)
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface Evolving {};
/**
* No guarantee is provided as to reliability or stability across any
* level of release granularity.
*/
@Documented
@Retention(RetentionPolicy.RUNTIME)
public @interface Unstable {};
}
| 2,483 | 36.636364 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.LimitedPrivate({"Common", "Avro", "Chukwa", "HBase", "HDFS",
"Hive", "MapReduce", "Pig", "ZooKeeper"})
package org.apache.hadoop.classification.tools;
import org.apache.hadoop.classification.InterfaceAudience;
| 1,038 | 44.173913 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.classification.tools;
import com.sun.javadoc.DocErrorReporter;
import com.sun.javadoc.LanguageVersion;
import com.sun.javadoc.RootDoc;
import com.sun.tools.doclets.standard.Standard;
/**
* A <a href="http://java.sun.com/javase/6/docs/jdk/api/javadoc/doclet/">Doclet</a>
* for excluding elements that are annotated with
* {@link org.apache.hadoop.classification.InterfaceAudience.Private} or
* {@link org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate}.
* It delegates to the Standard Doclet, and takes the same options.
*/
public class ExcludePrivateAnnotationsStandardDoclet {
public static LanguageVersion languageVersion() {
return LanguageVersion.JAVA_1_5;
}
public static boolean start(RootDoc root) {
System.out.println(
ExcludePrivateAnnotationsStandardDoclet.class.getSimpleName());
return Standard.start(RootDocProcessor.process(root));
}
public static int optionLength(String option) {
Integer length = StabilityOptions.optionLength(option);
if (length != null) {
return length;
}
return Standard.optionLength(option);
}
public static boolean validOptions(String[][] options,
DocErrorReporter reporter) {
StabilityOptions.validOptions(options, reporter);
String[][] filteredOptions = StabilityOptions.filterOptions(options);
return Standard.validOptions(filteredOptions, reporter);
}
}
| 2,239 | 36.966102 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/IncludePublicAnnotationsStandardDoclet.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.classification.tools;
import com.sun.javadoc.DocErrorReporter;
import com.sun.javadoc.LanguageVersion;
import com.sun.javadoc.RootDoc;
import com.sun.tools.doclets.standard.Standard;
/**
* A <a href="http://java.sun.com/javase/6/docs/jdk/api/javadoc/doclet/">Doclet</a>
* that only includes class-level elements that are annotated with
* {@link org.apache.hadoop.classification.InterfaceAudience.Public}.
* Class-level elements with no annotation are excluded.
* In addition, all elements that are annotated with
* {@link org.apache.hadoop.classification.InterfaceAudience.Private} or
* {@link org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate}
* are also excluded.
* It delegates to the Standard Doclet, and takes the same options.
*/
public class IncludePublicAnnotationsStandardDoclet {
public static LanguageVersion languageVersion() {
return LanguageVersion.JAVA_1_5;
}
public static boolean start(RootDoc root) {
System.out.println(
IncludePublicAnnotationsStandardDoclet.class.getSimpleName());
RootDocProcessor.treatUnannotatedClassesAsPrivate = true;
return Standard.start(RootDocProcessor.process(root));
}
public static int optionLength(String option) {
Integer length = StabilityOptions.optionLength(option);
if (length != null) {
return length;
}
return Standard.optionLength(option);
}
public static boolean validOptions(String[][] options,
DocErrorReporter reporter) {
StabilityOptions.validOptions(options, reporter);
String[][] filteredOptions = StabilityOptions.filterOptions(options);
return Standard.validOptions(filteredOptions, reporter);
}
}
| 2,517 | 38.34375 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.classification.tools;
import com.sun.javadoc.AnnotationDesc;
import com.sun.javadoc.AnnotationTypeDoc;
import com.sun.javadoc.ClassDoc;
import com.sun.javadoc.ConstructorDoc;
import com.sun.javadoc.Doc;
import com.sun.javadoc.FieldDoc;
import com.sun.javadoc.MethodDoc;
import com.sun.javadoc.PackageDoc;
import com.sun.javadoc.ProgramElementDoc;
import com.sun.javadoc.RootDoc;
import java.lang.reflect.Array;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.WeakHashMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Process the {@link RootDoc} by substituting with (nested) proxy objects that
* exclude elements with Private or LimitedPrivate annotations.
* <p>
* Based on code from http://www.sixlegs.com/blog/java/exclude-javadoc-tag.html.
*/
class RootDocProcessor {
static String stability = StabilityOptions.UNSTABLE_OPTION;
static boolean treatUnannotatedClassesAsPrivate = false;
public static RootDoc process(RootDoc root) {
return (RootDoc) process(root, RootDoc.class);
}
private static Object process(Object obj, Class<?> type) {
if (obj == null) {
return null;
}
Class<?> cls = obj.getClass();
if (cls.getName().startsWith("com.sun.")) {
return getProxy(obj);
} else if (obj instanceof Object[]) {
Class<?> componentType = type.isArray() ? type.getComponentType()
: cls.getComponentType();
Object[] array = (Object[]) obj;
Object[] newArray = (Object[]) Array.newInstance(componentType,
array.length);
for (int i = 0; i < array.length; ++i) {
newArray[i] = process(array[i], componentType);
}
return newArray;
}
return obj;
}
private static Map<Object, Object> proxies =
new WeakHashMap<Object, Object>();
private static Object getProxy(Object obj) {
Object proxy = proxies.get(obj);
if (proxy == null) {
proxy = Proxy.newProxyInstance(obj.getClass().getClassLoader(),
obj.getClass().getInterfaces(), new ExcludeHandler(obj));
proxies.put(obj, proxy);
}
return proxy;
}
private static class ExcludeHandler implements InvocationHandler {
private Object target;
public ExcludeHandler(Object target) {
this.target = target;
}
@Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
String methodName = method.getName();
if (target instanceof Doc) {
if (methodName.equals("isIncluded")) {
Doc doc = (Doc) target;
return !exclude(doc) && doc.isIncluded();
}
if (target instanceof RootDoc) {
if (methodName.equals("classes")) {
return filter(((RootDoc) target).classes(), ClassDoc.class);
} else if (methodName.equals("specifiedClasses")) {
return filter(((RootDoc) target).specifiedClasses(), ClassDoc.class);
} else if (methodName.equals("specifiedPackages")) {
return filter(((RootDoc) target).specifiedPackages(), PackageDoc.class);
}
} else if (target instanceof ClassDoc) {
if (isFiltered(args)) {
if (methodName.equals("methods")) {
return filter(((ClassDoc) target).methods(true), MethodDoc.class);
} else if (methodName.equals("fields")) {
return filter(((ClassDoc) target).fields(true), FieldDoc.class);
} else if (methodName.equals("innerClasses")) {
return filter(((ClassDoc) target).innerClasses(true),
ClassDoc.class);
} else if (methodName.equals("constructors")) {
return filter(((ClassDoc) target).constructors(true),
ConstructorDoc.class);
}
}
} else if (target instanceof PackageDoc) {
if (methodName.equals("allClasses")) {
if (isFiltered(args)) {
return filter(((PackageDoc) target).allClasses(true),
ClassDoc.class);
} else {
return filter(((PackageDoc) target).allClasses(), ClassDoc.class);
}
} else if (methodName.equals("annotationTypes")) {
return filter(((PackageDoc) target).annotationTypes(),
AnnotationTypeDoc.class);
} else if (methodName.equals("enums")) {
return filter(((PackageDoc) target).enums(),
ClassDoc.class);
} else if (methodName.equals("errors")) {
return filter(((PackageDoc) target).errors(),
ClassDoc.class);
} else if (methodName.equals("exceptions")) {
return filter(((PackageDoc) target).exceptions(),
ClassDoc.class);
} else if (methodName.equals("interfaces")) {
return filter(((PackageDoc) target).interfaces(),
ClassDoc.class);
} else if (methodName.equals("ordinaryClasses")) {
return filter(((PackageDoc) target).ordinaryClasses(),
ClassDoc.class);
}
}
}
if (args != null) {
if (methodName.equals("compareTo") || methodName.equals("equals")
|| methodName.equals("overrides")
|| methodName.equals("subclassOf")) {
args[0] = unwrap(args[0]);
}
}
try {
return process(method.invoke(target, args), method.getReturnType());
} catch (InvocationTargetException e) {
throw e.getTargetException();
}
}
private static boolean exclude(Doc doc) {
AnnotationDesc[] annotations = null;
if (doc instanceof ProgramElementDoc) {
annotations = ((ProgramElementDoc) doc).annotations();
} else if (doc instanceof PackageDoc) {
annotations = ((PackageDoc) doc).annotations();
}
if (annotations != null) {
for (AnnotationDesc annotation : annotations) {
String qualifiedTypeName = annotation.annotationType().qualifiedTypeName();
if (qualifiedTypeName.equals(
InterfaceAudience.Private.class.getCanonicalName())
|| qualifiedTypeName.equals(
InterfaceAudience.LimitedPrivate.class.getCanonicalName())) {
return true;
}
if (stability.equals(StabilityOptions.EVOLVING_OPTION)) {
if (qualifiedTypeName.equals(
InterfaceStability.Unstable.class.getCanonicalName())) {
return true;
}
}
if (stability.equals(StabilityOptions.STABLE_OPTION)) {
if (qualifiedTypeName.equals(
InterfaceStability.Unstable.class.getCanonicalName())
|| qualifiedTypeName.equals(
InterfaceStability.Evolving.class.getCanonicalName())) {
return true;
}
}
}
for (AnnotationDesc annotation : annotations) {
String qualifiedTypeName =
annotation.annotationType().qualifiedTypeName();
if (qualifiedTypeName.equals(
InterfaceAudience.Public.class.getCanonicalName())) {
return false;
}
}
}
if (treatUnannotatedClassesAsPrivate) {
return doc.isClass() || doc.isInterface() || doc.isAnnotationType();
}
return false;
}
private static Object[] filter(Doc[] array, Class<?> componentType) {
if (array == null || array.length == 0) {
return array;
}
List<Object> list = new ArrayList<Object>(array.length);
for (Doc entry : array) {
if (!exclude(entry)) {
list.add(process(entry, componentType));
}
}
return list.toArray((Object[]) Array.newInstance(componentType, list
.size()));
}
private Object unwrap(Object proxy) {
if (proxy instanceof Proxy)
return ((ExcludeHandler) Proxy.getInvocationHandler(proxy)).target;
return proxy;
}
private boolean isFiltered(Object[] args) {
return args != null && Boolean.TRUE.equals(args[0]);
}
}
}
| 9,163 | 35.951613 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.classification.tools;
import com.sun.javadoc.DocErrorReporter;
import com.sun.javadoc.LanguageVersion;
import com.sun.javadoc.RootDoc;
import jdiff.JDiff;
/**
* A <a href="http://java.sun.com/javase/6/docs/jdk/api/javadoc/doclet/">Doclet</a>
* for excluding elements that are annotated with
* {@link org.apache.hadoop.classification.InterfaceAudience.Private} or
* {@link org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate}.
* It delegates to the JDiff Doclet, and takes the same options.
*/
public class ExcludePrivateAnnotationsJDiffDoclet {
public static LanguageVersion languageVersion() {
return LanguageVersion.JAVA_1_5;
}
public static boolean start(RootDoc root) {
System.out.println(
ExcludePrivateAnnotationsJDiffDoclet.class.getSimpleName());
return JDiff.start(RootDocProcessor.process(root));
}
public static int optionLength(String option) {
Integer length = StabilityOptions.optionLength(option);
if (length != null) {
return length;
}
return JDiff.optionLength(option);
}
public static boolean validOptions(String[][] options,
DocErrorReporter reporter) {
StabilityOptions.validOptions(options, reporter);
String[][] filteredOptions = StabilityOptions.filterOptions(options);
return JDiff.validOptions(filteredOptions, reporter);
}
}
| 2,194 | 35.583333 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/StabilityOptions.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.classification.tools;
import com.sun.javadoc.DocErrorReporter;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
class StabilityOptions {
public static final String STABLE_OPTION = "-stable";
public static final String EVOLVING_OPTION = "-evolving";
public static final String UNSTABLE_OPTION = "-unstable";
public static Integer optionLength(String option) {
String opt = option.toLowerCase(Locale.ENGLISH);
if (opt.equals(UNSTABLE_OPTION)) return 1;
if (opt.equals(EVOLVING_OPTION)) return 1;
if (opt.equals(STABLE_OPTION)) return 1;
return null;
}
public static void validOptions(String[][] options,
DocErrorReporter reporter) {
for (int i = 0; i < options.length; i++) {
String opt = options[i][0].toLowerCase(Locale.ENGLISH);
if (opt.equals(UNSTABLE_OPTION)) {
RootDocProcessor.stability = UNSTABLE_OPTION;
} else if (opt.equals(EVOLVING_OPTION)) {
RootDocProcessor.stability = EVOLVING_OPTION;
} else if (opt.equals(STABLE_OPTION)) {
RootDocProcessor.stability = STABLE_OPTION;
}
}
}
public static String[][] filterOptions(String[][] options) {
List<String[]> optionsList = new ArrayList<String[]>();
for (int i = 0; i < options.length; i++) {
if (!options[i][0].equalsIgnoreCase(UNSTABLE_OPTION)
&& !options[i][0].equalsIgnoreCase(EVOLVING_OPTION)
&& !options[i][0].equalsIgnoreCase(STABLE_OPTION)) {
optionsList.add(options[i]);
}
}
String[][] filteredOptions = new String[optionsList.size()][];
int i = 0;
for (String[] option : optionsList) {
filteredOptions[i++] = option;
}
return filteredOptions;
}
}
| 2,565 | 35.140845 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSWithZK.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.util.ZKSignerSecretProvider;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.LoginContext;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URL;
import java.security.Principal;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
public class TestKMSWithZK {
protected Configuration createBaseKMSConf(File keyStoreDir) throws Exception {
Configuration conf = new Configuration(false);
conf.set(KMSConfiguration.KEY_PROVIDER_URI,
"jceks://file@" + new Path(keyStoreDir.getAbsolutePath(),
"kms.keystore").toUri());
conf.set("hadoop.kms.authentication.type", "simple");
conf.setBoolean(KMSConfiguration.KEY_AUTHORIZATION_ENABLE, false);
conf.set(KMSACLs.Type.GET_KEYS.getAclConfigKey(), "foo");
return conf;
}
@Test
public void testMultipleKMSInstancesWithZKSigner() throws Exception {
final File testDir = TestKMS.getTestDir();
Configuration conf = createBaseKMSConf(testDir);
TestingServer zkServer = new TestingServer();
zkServer.start();
MiniKMS kms1 = null;
MiniKMS kms2 = null;
conf.set(KMSAuthenticationFilter.CONFIG_PREFIX +
AuthenticationFilter.SIGNER_SECRET_PROVIDER, "zookeeper");
conf.set(KMSAuthenticationFilter.CONFIG_PREFIX +
ZKSignerSecretProvider.ZOOKEEPER_CONNECTION_STRING,
zkServer.getConnectString());
conf.set(KMSAuthenticationFilter.CONFIG_PREFIX +
ZKSignerSecretProvider.ZOOKEEPER_PATH, "/secret");
TestKMS.writeConf(testDir, conf);
try {
kms1 = new MiniKMS.Builder()
.setKmsConfDir(testDir).setLog4jConfFile("log4j.properties").build();
kms1.start();
kms2 = new MiniKMS.Builder()
.setKmsConfDir(testDir).setLog4jConfFile("log4j.properties").build();
kms2.start();
final URL url1 = new URL(kms1.getKMSUrl().toExternalForm() +
KMSRESTConstants.SERVICE_VERSION + "/" +
KMSRESTConstants.KEYS_NAMES_RESOURCE);
final URL url2 = new URL(kms2.getKMSUrl().toExternalForm() +
KMSRESTConstants.SERVICE_VERSION + "/" +
KMSRESTConstants.KEYS_NAMES_RESOURCE);
final DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
final DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
UserGroupInformation ugiFoo = UserGroupInformation.createUserForTesting(
"foo", new String[]{"gfoo"});
UserGroupInformation ugiBar = UserGroupInformation.createUserForTesting(
"bar", new String[]{"gBar"});
ugiFoo.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
HttpURLConnection conn = aUrl.openConnection(url1, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
return null;
}
});
ugiBar.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
HttpURLConnection conn = aUrl.openConnection(url2, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
return null;
}
});
ugiBar.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final DelegationTokenAuthenticatedURL.Token emptyToken =
new DelegationTokenAuthenticatedURL.Token();
HttpURLConnection conn = aUrl.openConnection(url2, emptyToken);
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
return null;
}
});
} finally {
if (kms2 != null) {
kms2.stop();
}
if (kms1 != null) {
kms1.stop();
}
zkServer.stop();
}
}
}
| 6,696 | 36.205556 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import com.google.common.base.Preconditions;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.security.SslSocketConnector;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Writer;
import java.net.InetAddress;
import java.net.MalformedURLException;
import java.net.ServerSocket;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.UUID;
public class MiniKMS {
private static Server createJettyServer(String keyStore, String password, int inPort) {
try {
boolean ssl = keyStore != null;
InetAddress localhost = InetAddress.getByName("localhost");
String host = "localhost";
ServerSocket ss = new ServerSocket((inPort < 0) ? 0 : inPort, 50, localhost);
int port = ss.getLocalPort();
ss.close();
Server server = new Server(0);
if (!ssl) {
server.getConnectors()[0].setHost(host);
server.getConnectors()[0].setPort(port);
} else {
SslSocketConnector c = new SslSocketConnectorSecure();
c.setHost(host);
c.setPort(port);
c.setNeedClientAuth(false);
c.setKeystore(keyStore);
c.setKeystoreType("jks");
c.setKeyPassword(password);
server.setConnectors(new Connector[]{c});
}
return server;
} catch (Exception ex) {
throw new RuntimeException("Could not start embedded servlet container, "
+ ex.getMessage(), ex);
}
}
private static URL getJettyURL(Server server) {
boolean ssl = server.getConnectors()[0].getClass()
== SslSocketConnectorSecure.class;
try {
String scheme = (ssl) ? "https" : "http";
return new URL(scheme + "://" +
server.getConnectors()[0].getHost() + ":" +
server.getConnectors()[0].getPort());
} catch (MalformedURLException ex) {
throw new RuntimeException("It should never happen, " + ex.getMessage(),
ex);
}
}
public static class Builder {
private File kmsConfDir;
private String log4jConfFile;
private File keyStoreFile;
private String keyStorePassword;
private int inPort = -1;
public Builder() {
kmsConfDir = new File("target/test-classes").getAbsoluteFile();
log4jConfFile = "kms-log4j.properties";
}
public Builder setKmsConfDir(File confDir) {
Preconditions.checkNotNull(confDir, "KMS conf dir is NULL");
Preconditions.checkArgument(confDir.exists(),
"KMS conf dir does not exist");
kmsConfDir = confDir;
return this;
}
public Builder setLog4jConfFile(String log4jConfFile) {
Preconditions.checkNotNull(log4jConfFile, "log4jconf file is NULL");
this.log4jConfFile = log4jConfFile;
return this;
}
public Builder setPort(int port) {
Preconditions.checkArgument(port > 0, "input port must be greater than 0");
this.inPort = port;
return this;
}
public Builder setSslConf(File keyStoreFile, String keyStorePassword) {
Preconditions.checkNotNull(keyStoreFile, "keystore file is NULL");
Preconditions.checkNotNull(keyStorePassword, "keystore password is NULL");
Preconditions.checkArgument(keyStoreFile.exists(),
"keystore file does not exist");
this.keyStoreFile = keyStoreFile;
this.keyStorePassword = keyStorePassword;
return this;
}
public MiniKMS build() {
Preconditions.checkArgument(kmsConfDir.exists(),
"KMS conf dir does not exist");
return new MiniKMS(kmsConfDir.getAbsolutePath(), log4jConfFile,
(keyStoreFile != null) ? keyStoreFile.getAbsolutePath() : null,
keyStorePassword, inPort);
}
}
private String kmsConfDir;
private String log4jConfFile;
private String keyStore;
private String keyStorePassword;
private Server jetty;
private int inPort;
private URL kmsURL;
public MiniKMS(String kmsConfDir, String log4ConfFile, String keyStore,
String password, int inPort) {
this.kmsConfDir = kmsConfDir;
this.log4jConfFile = log4ConfFile;
this.keyStore = keyStore;
this.keyStorePassword = password;
this.inPort = inPort;
}
public void start() throws Exception {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
System.setProperty(KMSConfiguration.KMS_CONFIG_DIR, kmsConfDir);
File aclsFile = new File(kmsConfDir, "kms-acls.xml");
if (!aclsFile.exists()) {
InputStream is = cl.getResourceAsStream("mini-kms-acls-default.xml");
OutputStream os = new FileOutputStream(aclsFile);
IOUtils.copy(is, os);
is.close();
os.close();
}
File coreFile = new File(kmsConfDir, "core-site.xml");
if (!coreFile.exists()) {
Configuration core = new Configuration();
Writer writer = new FileWriter(coreFile);
core.writeXml(writer);
writer.close();
}
File kmsFile = new File(kmsConfDir, "kms-site.xml");
if (!kmsFile.exists()) {
Configuration kms = new Configuration(false);
kms.set(KMSConfiguration.KEY_PROVIDER_URI,
"jceks://file@" + new Path(kmsConfDir, "kms.keystore").toUri());
kms.set("hadoop.kms.authentication.type", "simple");
Writer writer = new FileWriter(kmsFile);
kms.writeXml(writer);
writer.close();
}
System.setProperty("log4j.configuration", log4jConfFile);
jetty = createJettyServer(keyStore, keyStorePassword, inPort);
// we need to do a special handling for MiniKMS to work when in a dir and
// when in a JAR in the classpath thanks to Jetty way of handling of webapps
// when they are in the a DIR, WAR or JAR.
URL webXmlUrl = cl.getResource("kms-webapp/WEB-INF/web.xml");
if (webXmlUrl == null) {
throw new RuntimeException(
"Could not find kms-webapp/ dir in test classpath");
}
boolean webXmlInJar = webXmlUrl.getPath().contains(".jar!/");
String webappPath;
if (webXmlInJar) {
File webInf = new File("target/" + UUID.randomUUID().toString() +
"/kms-webapp/WEB-INF");
webInf.mkdirs();
new File(webInf, "web.xml").delete();
InputStream is = cl.getResourceAsStream("kms-webapp/WEB-INF/web.xml");
OutputStream os = new FileOutputStream(new File(webInf, "web.xml"));
IOUtils.copy(is, os);
is.close();
os.close();
webappPath = webInf.getParentFile().getAbsolutePath();
} else {
webappPath = cl.getResource("kms-webapp").getPath();
}
WebAppContext context = new WebAppContext(webappPath, "/kms");
if (webXmlInJar) {
context.setClassLoader(cl);
}
jetty.addHandler(context);
jetty.start();
kmsURL = new URL(getJettyURL(jetty), "kms");
}
public URL getKMSUrl() {
return kmsURL;
}
public void stop() {
if (jetty != null && jetty.isRunning()) {
try {
jetty.stop();
jetty = null;
} catch (Exception ex) {
throw new RuntimeException("Could not stop MiniKMS embedded Jetty, " +
ex.getMessage(), ex);
}
}
}
}
| 8,317 | 33.803347 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKeyAuthorizationKeyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.security.SecureRandom;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.UserProvider;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyACLs;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyOpType;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert;
import org.junit.Test;
public class TestKeyAuthorizationKeyProvider {
private static final String CIPHER = "AES";
@Test
public void testCreateKey() throws Exception {
final Configuration conf = new Configuration();
KeyProvider kp =
new UserProvider.Factory().createProvider(new URI("user:///"), conf);
KeyACLs mock = mock(KeyACLs.class);
when(mock.isACLPresent("foo", KeyOpType.MANAGEMENT)).thenReturn(true);
UserGroupInformation u1 = UserGroupInformation.createRemoteUser("u1");
when(mock.hasAccessToKey("foo", u1, KeyOpType.MANAGEMENT)).thenReturn(true);
final KeyProviderCryptoExtension kpExt =
new KeyAuthorizationKeyProvider(
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp),
mock);
u1.doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
kpExt.createKey("foo", SecureRandom.getSeed(16),
newOptions(conf));
} catch (IOException ioe) {
Assert.fail("User should be Authorized !!");
}
// "bar" key not configured
try {
kpExt.createKey("bar", SecureRandom.getSeed(16),
newOptions(conf));
Assert.fail("User should NOT be Authorized !!");
} catch (IOException ioe) {
// Ignore
}
return null;
}
}
);
// Unauthorized User
UserGroupInformation.createRemoteUser("badGuy").doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
kpExt.createKey("foo", SecureRandom.getSeed(16),
newOptions(conf));
Assert.fail("User should NOT be Authorized !!");
} catch (IOException ioe) {
// Ignore
}
return null;
}
}
);
}
@Test
public void testOpsWhenACLAttributeExists() throws Exception {
final Configuration conf = new Configuration();
KeyProvider kp =
new UserProvider.Factory().createProvider(new URI("user:///"), conf);
KeyACLs mock = mock(KeyACLs.class);
when(mock.isACLPresent("testKey", KeyOpType.MANAGEMENT)).thenReturn(true);
when(mock.isACLPresent("testKey", KeyOpType.GENERATE_EEK)).thenReturn(true);
when(mock.isACLPresent("testKey", KeyOpType.DECRYPT_EEK)).thenReturn(true);
when(mock.isACLPresent("testKey", KeyOpType.ALL)).thenReturn(true);
UserGroupInformation u1 = UserGroupInformation.createRemoteUser("u1");
UserGroupInformation u2 = UserGroupInformation.createRemoteUser("u2");
UserGroupInformation u3 = UserGroupInformation.createRemoteUser("u3");
UserGroupInformation sudo = UserGroupInformation.createRemoteUser("sudo");
when(mock.hasAccessToKey("testKey", u1, KeyOpType.MANAGEMENT)).thenReturn(true);
when(mock.hasAccessToKey("testKey", u2, KeyOpType.GENERATE_EEK)).thenReturn(true);
when(mock.hasAccessToKey("testKey", u3, KeyOpType.DECRYPT_EEK)).thenReturn(true);
when(mock.hasAccessToKey("testKey", sudo, KeyOpType.ALL)).thenReturn(true);
final KeyProviderCryptoExtension kpExt =
new KeyAuthorizationKeyProvider(
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp),
mock);
final KeyVersion barKv = u1.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
Options opt = newOptions(conf);
Map<String, String> m = new HashMap<String, String>();
m.put("key.acl.name", "testKey");
opt.setAttributes(m);
try {
KeyVersion kv =
kpExt.createKey("foo", SecureRandom.getSeed(16), opt);
kpExt.rollNewVersion(kv.getName());
kpExt.rollNewVersion(kv.getName(), SecureRandom.getSeed(16));
kpExt.deleteKey(kv.getName());
} catch (IOException ioe) {
Assert.fail("User should be Authorized !!");
}
KeyVersion retkv = null;
try {
retkv = kpExt.createKey("bar", SecureRandom.getSeed(16), opt);
kpExt.generateEncryptedKey(retkv.getName());
Assert.fail("User should NOT be Authorized to generate EEK !!");
} catch (IOException ioe) {
}
Assert.assertNotNull(retkv);
return retkv;
}
}
);
final EncryptedKeyVersion barEKv =
u2.doAs(
new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion run() throws Exception {
try {
kpExt.deleteKey(barKv.getName());
Assert.fail("User should NOT be Authorized to "
+ "perform any other operation !!");
} catch (IOException ioe) {
}
return kpExt.generateEncryptedKey(barKv.getName());
}
});
u3.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
try {
kpExt.deleteKey(barKv.getName());
Assert.fail("User should NOT be Authorized to "
+ "perform any other operation !!");
} catch (IOException ioe) {
}
return kpExt.decryptEncryptedKey(barEKv);
}
});
sudo.doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Options opt = newOptions(conf);
Map<String, String> m = new HashMap<String, String>();
m.put("key.acl.name", "testKey");
opt.setAttributes(m);
try {
KeyVersion kv =
kpExt.createKey("foo", SecureRandom.getSeed(16), opt);
kpExt.rollNewVersion(kv.getName());
kpExt.rollNewVersion(kv.getName(), SecureRandom.getSeed(16));
EncryptedKeyVersion ekv = kpExt.generateEncryptedKey(kv.getName());
kpExt.decryptEncryptedKey(ekv);
kpExt.deleteKey(kv.getName());
} catch (IOException ioe) {
Assert.fail("User should be Allowed to do everything !!");
}
return null;
}
}
);
}
private static KeyProvider.Options newOptions(Configuration conf) {
KeyProvider.Options options = new KeyProvider.Options(conf);
options.setCipher(CIPHER);
options.setBitLength(128);
return options;
}
@Test(expected = IllegalArgumentException.class)
public void testDecryptWithKeyVersionNameKeyMismatch() throws Exception {
final Configuration conf = new Configuration();
KeyProvider kp =
new UserProvider.Factory().createProvider(new URI("user:///"), conf);
KeyACLs mock = mock(KeyACLs.class);
when(mock.isACLPresent("testKey", KeyOpType.MANAGEMENT)).thenReturn(true);
when(mock.isACLPresent("testKey", KeyOpType.GENERATE_EEK)).thenReturn(true);
when(mock.isACLPresent("testKey", KeyOpType.DECRYPT_EEK)).thenReturn(true);
when(mock.isACLPresent("testKey", KeyOpType.ALL)).thenReturn(true);
UserGroupInformation u1 = UserGroupInformation.createRemoteUser("u1");
UserGroupInformation u2 = UserGroupInformation.createRemoteUser("u2");
UserGroupInformation u3 = UserGroupInformation.createRemoteUser("u3");
UserGroupInformation sudo = UserGroupInformation.createRemoteUser("sudo");
when(mock.hasAccessToKey("testKey", u1,
KeyOpType.MANAGEMENT)).thenReturn(true);
when(mock.hasAccessToKey("testKey", u2,
KeyOpType.GENERATE_EEK)).thenReturn(true);
when(mock.hasAccessToKey("testKey", u3,
KeyOpType.DECRYPT_EEK)).thenReturn(true);
when(mock.hasAccessToKey("testKey", sudo,
KeyOpType.ALL)).thenReturn(true);
final KeyProviderCryptoExtension kpExt =
new KeyAuthorizationKeyProvider(
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp),
mock);
sudo.doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Options opt = newOptions(conf);
Map<String, String> m = new HashMap<String, String>();
m.put("key.acl.name", "testKey");
opt.setAttributes(m);
KeyVersion kv =
kpExt.createKey("foo", SecureRandom.getSeed(16), opt);
kpExt.rollNewVersion(kv.getName());
kpExt.rollNewVersion(kv.getName(), SecureRandom.getSeed(16));
EncryptedKeyVersion ekv = kpExt.generateEncryptedKey(kv.getName());
ekv = EncryptedKeyVersion.createForDecryption(
ekv.getEncryptionKeyName() + "x",
ekv.getEncryptionKeyVersionName(),
ekv.getEncryptedKeyIv(),
ekv.getEncryptedKeyVersion().getMaterial());
kpExt.decryptEncryptedKey(ekv);
return null;
}
}
);
}
}
| 11,051 | 39.632353 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.token.Token;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.LoginContext;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URL;
import java.security.Principal;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
public class TestKMS {
@Before
public void cleanUp() {
// resetting kerberos security
Configuration conf = new Configuration();
UserGroupInformation.setConfiguration(conf);
}
public static File getTestDir() throws Exception {
File file = new File("dummy");
file = file.getAbsoluteFile();
file = file.getParentFile();
file = new File(file, "target");
file = new File(file, UUID.randomUUID().toString());
if (!file.mkdirs()) {
throw new RuntimeException("Could not create test directory: " + file);
}
return file;
}
public static abstract class KMSCallable<T> implements Callable<T> {
private URL kmsUrl;
protected URL getKMSUrl() {
return kmsUrl;
}
}
protected KeyProvider createProvider(URI uri, Configuration conf)
throws IOException {
return new LoadBalancingKMSClientProvider(
new KMSClientProvider[] { new KMSClientProvider(uri, conf) }, conf);
}
protected <T> T runServer(String keystore, String password, File confDir,
KMSCallable<T> callable) throws Exception {
return runServer(-1, keystore, password, confDir, callable);
}
protected <T> T runServer(int port, String keystore, String password, File confDir,
KMSCallable<T> callable) throws Exception {
MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder().setKmsConfDir(confDir)
.setLog4jConfFile("log4j.properties");
if (keystore != null) {
miniKMSBuilder.setSslConf(new File(keystore), password);
}
if (port > 0) {
miniKMSBuilder.setPort(port);
}
MiniKMS miniKMS = miniKMSBuilder.build();
miniKMS.start();
try {
System.out.println("Test KMS running at: " + miniKMS.getKMSUrl());
callable.kmsUrl = miniKMS.getKMSUrl();
return callable.call();
} finally {
miniKMS.stop();
}
}
protected Configuration createBaseKMSConf(File keyStoreDir) throws Exception {
Configuration conf = new Configuration(false);
conf.set(KMSConfiguration.KEY_PROVIDER_URI,
"jceks://file@" + new Path(keyStoreDir.getAbsolutePath(), "kms.keystore").toUri());
conf.set("hadoop.kms.authentication.type", "simple");
return conf;
}
public static void writeConf(File confDir, Configuration conf)
throws Exception {
Writer writer = new FileWriter(new File(confDir,
KMSConfiguration.KMS_SITE_XML));
conf.writeXml(writer);
writer.close();
writer = new FileWriter(new File(confDir, KMSConfiguration.KMS_ACLS_XML));
conf.writeXml(writer);
writer.close();
//create empty core-site.xml
writer = new FileWriter(new File(confDir, "core-site.xml"));
new Configuration(false).writeXml(writer);
writer.close();
}
public static URI createKMSUri(URL kmsUrl) throws Exception {
String str = kmsUrl.toString();
str = str.replaceFirst("://", "@");
return new URI("kms://" + str);
}
private static class KerberosConfiguration
extends javax.security.auth.login.Configuration {
private String principal;
private String keytab;
private boolean isInitiator;
private KerberosConfiguration(String principal, File keytab,
boolean client) {
this.principal = principal;
this.keytab = keytab.getAbsolutePath();
this.isInitiator = client;
}
public static javax.security.auth.login.Configuration createClientConfig(
String principal,
File keytab) {
return new KerberosConfiguration(principal, keytab, true);
}
private static String getKrb5LoginModuleName() {
return System.getProperty("java.vendor").contains("IBM")
? "com.ibm.security.auth.module.Krb5LoginModule"
: "com.sun.security.auth.module.Krb5LoginModule";
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
options.put("keyTab", keytab);
options.put("principal", principal);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", Boolean.toString(isInitiator));
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry(getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options)};
}
}
private static MiniKdc kdc;
private static File keytab;
@BeforeClass
public static void setUpMiniKdc() throws Exception {
File kdcDir = getTestDir();
Properties kdcConf = MiniKdc.createConf();
kdc = new MiniKdc(kdcConf, kdcDir);
kdc.start();
keytab = new File(kdcDir, "keytab");
List<String> principals = new ArrayList<String>();
principals.add("HTTP/localhost");
principals.add("client");
principals.add("hdfs");
principals.add("otheradmin");
principals.add("client/host");
principals.add("client1");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
principals.add(type.toString());
}
principals.add("CREATE_MATERIAL");
principals.add("ROLLOVER_MATERIAL");
kdc.createPrincipal(keytab,
principals.toArray(new String[principals.size()]));
}
@AfterClass
public static void tearDownMiniKdc() throws Exception {
if (kdc != null) {
kdc.stop();
}
}
private <T> T doAs(String user, final PrivilegedExceptionAction<T> action)
throws Exception {
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(user));
//client login
Subject subject = new Subject(false, principals,
new HashSet<Object>(), new HashSet<Object>());
LoginContext loginContext = new LoginContext("", subject, null,
KerberosConfiguration.createClientConfig(user, keytab));
try {
loginContext.login();
subject = loginContext.getSubject();
UserGroupInformation ugi =
UserGroupInformation.getUGIFromSubject(subject);
return ugi.doAs(action);
} finally {
loginContext.logout();
}
}
public void testStartStop(final boolean ssl, final boolean kerberos)
throws Exception {
Configuration conf = new Configuration();
if (kerberos) {
conf.set("hadoop.security.authentication", "kerberos");
}
UserGroupInformation.setConfiguration(conf);
File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
final String keystore;
final String password;
if (ssl) {
String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestKMS.class);
KeyStoreTestUtil.setupSSLConfig(testDir.getAbsolutePath(), sslConfDir,
conf, false);
keystore = testDir.getAbsolutePath() + "/serverKS.jks";
password = "serverP";
} else {
keystore = null;
password = null;
}
conf.set("hadoop.kms.authentication.token.validity", "1");
if (kerberos) {
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
}
writeConf(testDir, conf);
runServer(keystore, password, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
URL url = getKMSUrl();
Assert.assertEquals(keystore != null,
url.getProtocol().equals("https"));
final URI uri = createKMSUri(getKMSUrl());
if (ssl) {
KeyProvider testKp = createProvider(uri, conf);
ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
while (threadGroup.getParent() != null) {
threadGroup = threadGroup.getParent();
}
Thread[] threads = new Thread[threadGroup.activeCount()];
threadGroup.enumerate(threads);
Thread reloaderThread = null;
for (Thread thread : threads) {
if ((thread.getName() != null)
&& (thread.getName().contains("Truststore reloader thread"))) {
reloaderThread = thread;
}
}
Assert.assertTrue("Reloader is not alive", reloaderThread.isAlive());
testKp.close();
boolean reloaderStillAlive = true;
for (int i = 0; i < 10; i++) {
reloaderStillAlive = reloaderThread.isAlive();
if (!reloaderStillAlive) break;
Thread.sleep(1000);
}
Assert.assertFalse("Reloader is still alive", reloaderStillAlive);
}
if (kerberos) {
for (String user : new String[]{"client", "client/host"}) {
doAs(user, new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
final KeyProvider kp = createProvider(uri, conf);
// getKeys() empty
Assert.assertTrue(kp.getKeys().isEmpty());
Thread.sleep(4000);
Token<?>[] tokens =
((KeyProviderDelegationTokenExtension.DelegationTokenExtension)kp)
.addDelegationTokens("myuser", new Credentials());
Assert.assertEquals(1, tokens.length);
Assert.assertEquals("kms-dt", tokens[0].getKind().toString());
return null;
}
});
}
} else {
KeyProvider kp = createProvider(uri, conf);
// getKeys() empty
Assert.assertTrue(kp.getKeys().isEmpty());
Thread.sleep(4000);
Token<?>[] tokens =
((KeyProviderDelegationTokenExtension.DelegationTokenExtension)kp)
.addDelegationTokens("myuser", new Credentials());
Assert.assertEquals(1, tokens.length);
Assert.assertEquals("kms-dt", tokens[0].getKind().toString());
}
return null;
}
});
}
@Test
public void testStartStopHttpPseudo() throws Exception {
testStartStop(false, false);
}
@Test
public void testStartStopHttpsPseudo() throws Exception {
testStartStop(true, false);
}
@Test
public void testStartStopHttpKerberos() throws Exception {
testStartStop(false, true);
}
@Test
public void testStartStopHttpsKerberos() throws Exception {
testStartStop(true, true);
}
@Test
public void testKMSProvider() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File confDir = getTestDir();
conf = createBaseKMSConf(confDir);
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.MANAGEMENT", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.READ", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k4.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k5.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k6.ALL", "*");
writeConf(confDir, conf);
runServer(null, null, confDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
Date started = new Date();
Configuration conf = new Configuration();
URI uri = createKMSUri(getKMSUrl());
KeyProvider kp = createProvider(uri, conf);
// getKeys() empty
Assert.assertTrue(kp.getKeys().isEmpty());
// getKeysMetadata() empty
Assert.assertEquals(0, kp.getKeysMetadata().length);
// createKey()
KeyProvider.Options options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("l1");
KeyProvider.KeyVersion kv0 = kp.createKey("k1", options);
Assert.assertNotNull(kv0);
Assert.assertNotNull(kv0.getVersionName());
Assert.assertNotNull(kv0.getMaterial());
// getKeyVersion()
KeyProvider.KeyVersion kv1 = kp.getKeyVersion(kv0.getVersionName());
Assert.assertEquals(kv0.getVersionName(), kv1.getVersionName());
Assert.assertNotNull(kv1.getMaterial());
// getCurrent()
KeyProvider.KeyVersion cv1 = kp.getCurrentKey("k1");
Assert.assertEquals(kv0.getVersionName(), cv1.getVersionName());
Assert.assertNotNull(cv1.getMaterial());
// getKeyMetadata() 1 version
KeyProvider.Metadata m1 = kp.getMetadata("k1");
Assert.assertEquals("AES/CTR/NoPadding", m1.getCipher());
Assert.assertEquals("AES", m1.getAlgorithm());
Assert.assertEquals(128, m1.getBitLength());
Assert.assertEquals(1, m1.getVersions());
Assert.assertNotNull(m1.getCreated());
Assert.assertTrue(started.before(m1.getCreated()));
// getKeyVersions() 1 version
List<KeyProvider.KeyVersion> lkv1 = kp.getKeyVersions("k1");
Assert.assertEquals(1, lkv1.size());
Assert.assertEquals(kv0.getVersionName(), lkv1.get(0).getVersionName());
Assert.assertNotNull(kv1.getMaterial());
// rollNewVersion()
KeyProvider.KeyVersion kv2 = kp.rollNewVersion("k1");
Assert.assertNotSame(kv0.getVersionName(), kv2.getVersionName());
Assert.assertNotNull(kv2.getMaterial());
// getKeyVersion()
kv2 = kp.getKeyVersion(kv2.getVersionName());
boolean eq = true;
for (int i = 0; i < kv1.getMaterial().length; i++) {
eq = eq && kv1.getMaterial()[i] == kv2.getMaterial()[i];
}
Assert.assertFalse(eq);
// getCurrent()
KeyProvider.KeyVersion cv2 = kp.getCurrentKey("k1");
Assert.assertEquals(kv2.getVersionName(), cv2.getVersionName());
Assert.assertNotNull(cv2.getMaterial());
eq = true;
for (int i = 0; i < kv1.getMaterial().length; i++) {
eq = eq && cv2.getMaterial()[i] == kv2.getMaterial()[i];
}
Assert.assertTrue(eq);
// getKeyVersions() 2 versions
List<KeyProvider.KeyVersion> lkv2 = kp.getKeyVersions("k1");
Assert.assertEquals(2, lkv2.size());
Assert.assertEquals(kv1.getVersionName(), lkv2.get(0).getVersionName());
Assert.assertNotNull(lkv2.get(0).getMaterial());
Assert.assertEquals(kv2.getVersionName(), lkv2.get(1).getVersionName());
Assert.assertNotNull(lkv2.get(1).getMaterial());
// getKeyMetadata() 2 version
KeyProvider.Metadata m2 = kp.getMetadata("k1");
Assert.assertEquals("AES/CTR/NoPadding", m2.getCipher());
Assert.assertEquals("AES", m2.getAlgorithm());
Assert.assertEquals(128, m2.getBitLength());
Assert.assertEquals(2, m2.getVersions());
Assert.assertNotNull(m2.getCreated());
Assert.assertTrue(started.before(m2.getCreated()));
// getKeys() 1 key
List<String> ks1 = kp.getKeys();
Assert.assertEquals(1, ks1.size());
Assert.assertEquals("k1", ks1.get(0));
// getKeysMetadata() 1 key 2 versions
KeyProvider.Metadata[] kms1 = kp.getKeysMetadata("k1");
Assert.assertEquals(1, kms1.length);
Assert.assertEquals("AES/CTR/NoPadding", kms1[0].getCipher());
Assert.assertEquals("AES", kms1[0].getAlgorithm());
Assert.assertEquals(128, kms1[0].getBitLength());
Assert.assertEquals(2, kms1[0].getVersions());
Assert.assertNotNull(kms1[0].getCreated());
Assert.assertTrue(started.before(kms1[0].getCreated()));
// test generate and decryption of EEK
KeyProvider.KeyVersion kv = kp.getCurrentKey("k1");
KeyProviderCryptoExtension kpExt =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1 = kpExt.generateEncryptedKey(kv.getName());
Assert.assertEquals(KeyProviderCryptoExtension.EEK,
ek1.getEncryptedKeyVersion().getVersionName());
Assert.assertNotNull(ek1.getEncryptedKeyVersion().getMaterial());
Assert.assertEquals(kv.getMaterial().length,
ek1.getEncryptedKeyVersion().getMaterial().length);
KeyProvider.KeyVersion k1 = kpExt.decryptEncryptedKey(ek1);
Assert.assertEquals(KeyProviderCryptoExtension.EK, k1.getVersionName());
KeyProvider.KeyVersion k1a = kpExt.decryptEncryptedKey(ek1);
Assert.assertArrayEquals(k1.getMaterial(), k1a.getMaterial());
Assert.assertEquals(kv.getMaterial().length, k1.getMaterial().length);
EncryptedKeyVersion ek2 = kpExt.generateEncryptedKey(kv.getName());
KeyProvider.KeyVersion k2 = kpExt.decryptEncryptedKey(ek2);
boolean isEq = true;
for (int i = 0; isEq && i < ek2.getEncryptedKeyVersion()
.getMaterial().length; i++) {
isEq = k2.getMaterial()[i] == k1.getMaterial()[i];
}
Assert.assertFalse(isEq);
// deleteKey()
kp.deleteKey("k1");
// Check decryption after Key deletion
try {
kpExt.decryptEncryptedKey(ek1);
Assert.fail("Should not be allowed !!");
} catch (Exception e) {
Assert.assertTrue(e.getMessage().contains("'k1@1' not found"));
}
// getKey()
Assert.assertNull(kp.getKeyVersion("k1"));
// getKeyVersions()
Assert.assertNull(kp.getKeyVersions("k1"));
// getMetadata()
Assert.assertNull(kp.getMetadata("k1"));
// getKeys() empty
Assert.assertTrue(kp.getKeys().isEmpty());
// getKeysMetadata() empty
Assert.assertEquals(0, kp.getKeysMetadata().length);
// createKey() no description, no tags
options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
KeyVersion kVer2 = kp.createKey("k2", options);
KeyProvider.Metadata meta = kp.getMetadata("k2");
Assert.assertNull(meta.getDescription());
Assert.assertEquals("k2", meta.getAttributes().get("key.acl.name"));
// test key ACL.. k2 is granted only MANAGEMENT Op access
try {
kpExt =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpExt.generateEncryptedKey(kVer2.getName());
Assert.fail("User should not be allowed to encrypt !!");
} catch (Exception ex) {
//
}
// createKey() description, no tags
options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("d");
kp.createKey("k3", options);
meta = kp.getMetadata("k3");
Assert.assertEquals("d", meta.getDescription());
Assert.assertEquals("k3", meta.getAttributes().get("key.acl.name"));
Map<String, String> attributes = new HashMap<String, String>();
attributes.put("a", "A");
// createKey() no description, tags
options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
attributes.put("key.acl.name", "k4");
options.setAttributes(attributes);
kp.createKey("k4", options);
meta = kp.getMetadata("k4");
Assert.assertNull(meta.getDescription());
Assert.assertEquals(attributes, meta.getAttributes());
// createKey() description, tags
options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("d");
attributes.put("key.acl.name", "k5");
options.setAttributes(attributes);
kp.createKey("k5", options);
meta = kp.getMetadata("k5");
Assert.assertEquals("d", meta.getDescription());
Assert.assertEquals(attributes, meta.getAttributes());
// test delegation token retrieval
KeyProviderDelegationTokenExtension kpdte =
KeyProviderDelegationTokenExtension.
createKeyProviderDelegationTokenExtension(kp);
Credentials credentials = new Credentials();
kpdte.addDelegationTokens("foo", credentials);
Assert.assertEquals(1, credentials.getAllTokens().size());
InetSocketAddress kmsAddr = new InetSocketAddress(getKMSUrl().getHost(),
getKMSUrl().getPort());
Assert.assertEquals(new Text("kms-dt"), credentials.getToken(
SecurityUtil.buildTokenService(kmsAddr)).getKind());
// test rollover draining
KeyProviderCryptoExtension kpce = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
kpce.createKey("k6", options);
EncryptedKeyVersion ekv1 = kpce.generateEncryptedKey("k6");
kpce.rollNewVersion("k6");
EncryptedKeyVersion ekv2 = kpce.generateEncryptedKey("k6");
Assert.assertNotEquals(ekv1.getEncryptionKeyVersionName(),
ekv2.getEncryptionKeyVersionName());
return null;
}
});
}
@Test
public void testKeyACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK,DECRYPT_EEK");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK,DECRYPT_EEK");
conf.set(KMSACLs.Type.GENERATE_EEK.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK,DECRYPT_EEK");
conf.set(KMSACLs.Type.DECRYPT_EEK.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "test_key.MANAGEMENT", "CREATE");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "some_key.MANAGEMENT", "ROLLOVER");
conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT", "DECRYPT_EEK");
conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "ALL", "DECRYPT_EEK");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "all_access.ALL", "GENERATE_EEK");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "all_access.DECRYPT_EEK", "ROLLOVER");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "ROLLOVER");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK", "SOMEBODY");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "ALL", "ROLLOVER");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("CREATE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "test_key");
options.setAttributes(newAttribs);
KeyProvider.KeyVersion kv = kp.createKey("k0", options);
Assert.assertNull(kv.getMaterial());
KeyVersion rollVersion = kp.rollNewVersion("k0");
Assert.assertNull(rollVersion.getMaterial());
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
kpce.generateEncryptedKey("k0");
Assert.fail("User [CREATE] should not be allowed to generate_eek on k0");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
try {
kp.createKey("kx", options);
Assert.fail("User [CREATE] should not be allowed to create kx");
} catch (Exception e) {
// Ignore
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
// Test whitelist key access..
// DECRYPT_EEK is whitelisted for MANAGEMENT operations only
doAs("DECRYPT_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "some_key");
options.setAttributes(newAttribs);
KeyProvider.KeyVersion kv = kp.createKey("kk0", options);
Assert.assertNull(kv.getMaterial());
KeyVersion rollVersion = kp.rollNewVersion("kk0");
Assert.assertNull(rollVersion.getMaterial());
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
kpce.generateEncryptedKey("kk0");
Assert.fail("User [DECRYPT_EEK] should not be allowed to generate_eek on kk0");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
kp.createKey("kkx", options);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "test_key2");
options.setAttributes(newAttribs);
KeyProvider.KeyVersion kv = kp.createKey("k1", options);
Assert.assertNull(kv.getMaterial());
KeyVersion rollVersion = kp.rollNewVersion("k1");
Assert.assertNull(rollVersion.getMaterial());
try {
kp.rollNewVersion("k0");
Assert.fail("User [ROLLOVER] should not be allowed to rollover k0");
} catch (Exception e) {
// Ignore
}
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
kpce.generateEncryptedKey("k1");
Assert.fail("User [ROLLOVER] should not be allowed to generate_eek on k1");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
try {
kp.createKey("kx", options);
Assert.fail("User [ROLLOVER] should not be allowed to create kx");
} catch (Exception e) {
// Ignore
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GET", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "test_key");
options.setAttributes(newAttribs);
try {
kp.createKey("k2", options);
Assert.fail("User [GET] should not be allowed to create key..");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
try {
kp.createKey("kx", options);
Assert.fail("User [GET] should not be allowed to create kx");
} catch (Exception e) {
// Ignore
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
final EncryptedKeyVersion ekv = doAs("GENERATE_EEK", new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
kp.createKey("kx", options);
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
return kpce.generateEncryptedKey("kx");
} catch (Exception e) {
Assert.fail("User [GENERATE_EEK] should be allowed to generate_eek on kx");
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpce.decryptEncryptedKey(ekv);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
return null;
}
});
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("GENERATE_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
kpce.generateEncryptedKey("k1");
} catch (Exception e) {
Assert.fail("User [GENERATE_EEK] should be allowed to generate_eek on k1");
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
return null;
}
});
}
@Test
public void testKMSRestartKerberosAuth() throws Exception {
doKMSRestart(true);
}
@Test
public void testKMSRestartSimpleAuth() throws Exception {
doKMSRestart(false);
}
public void doKMSRestart(boolean useKrb) throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
if (useKrb) {
conf.set("hadoop.kms.authentication.type", "kerberos");
}
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
writeConf(testDir, conf);
KMSCallable<KeyProvider> c =
new KMSCallable<KeyProvider>() {
@Override
public KeyProvider call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
final KeyProvider kp =
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<KeyProvider>() {
@Override
public KeyProvider run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
return kp;
}
});
return kp;
}
};
final KeyProvider retKp =
runServer(null, null, testDir, c);
// Restart server (using the same port)
runServer(c.getKMSUrl().getPort(), null, null, testDir,
new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
retKp.createKey("k2", new byte[16],
new KeyProvider.Options(conf));
retKp.createKey("k3", new byte[16],
new KeyProvider.Options(conf));
return null;
}
});
return null;
}
});
}
@Test
public void testKMSAuthFailureRetry() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set("hadoop.kms.authentication.token.validity", "1");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k4.ALL", "*");
writeConf(testDir, conf);
runServer(null, null, testDir,
new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("k0", new byte[16],
new KeyProvider.Options(conf));
// This happens before rollover
kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
// Atleast 2 rollovers.. so should induce signer Exception
Thread.sleep(3500);
kp.createKey("k2", new byte[16],
new KeyProvider.Options(conf));
return null;
}
});
return null;
}
});
// Test retry count
runServer(null, null, testDir,
new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
conf.setInt(KMSClientProvider.AUTH_RETRY, 0);
final URI uri = createKMSUri(getKMSUrl());
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("k3", new byte[16],
new KeyProvider.Options(conf));
// Atleast 2 rollovers.. so should induce signer Exception
Thread.sleep(3500);
try {
kp.createKey("k4", new byte[16],
new KeyProvider.Options(conf));
Assert.fail("This should not succeed..");
} catch (IOException e) {
Assert.assertTrue(
"HTTP exception must be a 401 : " + e.getMessage(), e
.getMessage().contains("401"));
}
return null;
}
});
return null;
}
});
}
@Test
public void testACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
//nothing allowed
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.createKey("k", new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.createKey("k", new byte[16], new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.rollNewVersion("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.rollNewVersion("k", new byte[16]);
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeys();
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeysMetadata("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
// we are using JavaKeyStoreProvider for testing, so we know how
// the keyversion is created.
kp.getKeyVersion("k@0");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getCurrentKey("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getMetadata("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeyVersions("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("CREATE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv = kp.createKey("k0",
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("DELETE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.deleteKey("k0");
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv = kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv = kp.rollNewVersion("k1");
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv =
kp.rollNewVersion("k1", new byte[16]);
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
final KeyVersion currKv =
doAs("GET", new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.getKeyVersion("k1@0");
KeyVersion kv = kp.getCurrentKey("k1");
return kv;
} catch (Exception ex) {
Assert.fail(ex.toString());
}
return null;
}
});
final EncryptedKeyVersion encKv =
doAs("GENERATE_EEK",
new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1 =
kpCE.generateEncryptedKey(currKv.getName());
return ek1;
} catch (Exception ex) {
Assert.fail(ex.toString());
}
return null;
}
});
doAs("DECRYPT_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
kpCE.decryptEncryptedKey(encKv);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GET_KEYS", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.getKeys();
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GET_METADATA", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.getMetadata("k1");
kp.getKeysMetadata("k1");
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
//stop the reloader, to avoid running while we are writing the new file
KMSWebApp.getACLs().stopReloader();
// test ACL reloading
Thread.sleep(10); // to ensure the ACLs file modifiedTime is newer
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "foo");
writeConf(testDir, conf);
Thread.sleep(1000);
KMSWebApp.getACLs().run(); // forcing a reload by hand.
// should not be able to create a key now
doAs("CREATE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("k2",
new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
return null;
}
});
}
@Test
public void testKMSBlackList() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), " ");
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client,hdfs,otheradmin");
conf.set(KMSACLs.Type.GENERATE_EEK.getAclConfigKey(), "client,hdfs,otheradmin");
conf.set(KMSACLs.Type.DECRYPT_EEK.getAclConfigKey(), "client,hdfs,otheradmin");
conf.set(KMSACLs.Type.DECRYPT_EEK.getBlacklistConfigKey(), "hdfs,otheradmin");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "ck0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "ck1.ALL", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck0",
new KeyProvider.Options(conf));
EncryptedKeyVersion eek =
((CryptoExtension)kp).generateEncryptedKey("ck0");
((CryptoExtension)kp).decryptEncryptedKey(eek);
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("hdfs", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck1",
new KeyProvider.Options(conf));
EncryptedKeyVersion eek =
((CryptoExtension)kp).generateEncryptedKey("ck1");
((CryptoExtension)kp).decryptEncryptedKey(eek);
Assert.fail("admin user must not be allowed to decrypt !!");
} catch (Exception ex) {
}
return null;
}
});
doAs("otheradmin", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck2",
new KeyProvider.Options(conf));
EncryptedKeyVersion eek =
((CryptoExtension)kp).generateEncryptedKey("ck2");
((CryptoExtension)kp).decryptEncryptedKey(eek);
Assert.fail("admin user must not be allowed to decrypt !!");
} catch (Exception ex) {
}
return null;
}
});
return null;
}
});
}
@Test
public void testServicePrincipalACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), " ");
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "client,client/host");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
final URI uri = createKMSUri(getKMSUrl());
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck0",
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("client/host", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck1",
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
return null;
}
});
}
/**
* Test the configurable timeout in the KMSClientProvider. Open up a
* socket, but don't accept connections for it. This leads to a timeout
* when the KMS client attempts to connect.
* @throws Exception
*/
@Test
public void testKMSTimeout() throws Exception {
File confDir = getTestDir();
Configuration conf = createBaseKMSConf(confDir);
conf.setInt(KMSClientProvider.TIMEOUT_ATTR, 1);
writeConf(confDir, conf);
ServerSocket sock;
int port;
try {
sock = new ServerSocket(0, 50, InetAddress.getByName("localhost"));
port = sock.getLocalPort();
} catch ( Exception e ) {
/* Problem creating socket? Just bail. */
return;
}
URL url = new URL("http://localhost:" + port + "/kms");
URI uri = createKMSUri(url);
boolean caughtTimeout = false;
try {
KeyProvider kp = createProvider(uri, conf);
kp.getKeys();
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
caughtTimeout = false;
try {
KeyProvider kp = createProvider(uri, conf);
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp)
.generateEncryptedKey("a");
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
caughtTimeout = false;
try {
KeyProvider kp = createProvider(uri, conf);
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp)
.decryptEncryptedKey(
new KMSClientProvider.KMSEncryptedKeyVersion("a",
"a", new byte[] {1, 2}, "EEK", new byte[] {1, 2}));
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
Assert.assertTrue(caughtTimeout);
sock.close();
}
@Test
public void testDelegationTokenAccess() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
final String keyA = "key_a";
final String keyD = "key_d";
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + keyA + ".ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + keyD + ".ALL", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
final URI uri = createKMSUri(getKMSUrl());
final Credentials credentials = new Credentials();
final UserGroupInformation nonKerberosUgi =
UserGroupInformation.getCurrentUser();
try {
KeyProvider kp = createProvider(uri, conf);
kp.createKey(keyA, new KeyProvider.Options(conf));
} catch (IOException ex) {
System.out.println(ex.getMessage());
}
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
KeyProviderDelegationTokenExtension kpdte =
KeyProviderDelegationTokenExtension.
createKeyProviderDelegationTokenExtension(kp);
kpdte.addDelegationTokens("foo", credentials);
return null;
}
});
nonKerberosUgi.addCredentials(credentials);
try {
KeyProvider kp = createProvider(uri, conf);
kp.createKey(keyA, new KeyProvider.Options(conf));
} catch (IOException ex) {
System.out.println(ex.getMessage());
}
nonKerberosUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey(keyD, new KeyProvider.Options(conf));
return null;
}
});
return null;
}
});
}
@Test
public void testKMSWithZKSigner() throws Exception {
doKMSWithZK(true, false);
}
@Test
public void testKMSWithZKDTSM() throws Exception {
doKMSWithZK(false, true);
}
@Test
public void testKMSWithZKSignerAndDTSM() throws Exception {
doKMSWithZK(true, true);
}
public void doKMSWithZK(boolean zkDTSM, boolean zkSigner) throws Exception {
TestingServer zkServer = null;
try {
zkServer = new TestingServer();
zkServer.start();
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab", keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
if (zkSigner) {
conf.set("hadoop.kms.authentication.signer.secret.provider", "zookeeper");
conf.set("hadoop.kms.authentication.signer.secret.provider.zookeeper.path","/testKMSWithZKDTSM");
conf.set("hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string",zkServer.getConnectString());
}
if (zkDTSM) {
conf.set("hadoop.kms.authentication.zk-dt-secret-manager.enable", "true");
}
if (zkDTSM && !zkSigner) {
conf.set("hadoop.kms.authentication.zk-dt-secret-manager.zkConnectionString", zkServer.getConnectString());
conf.set("hadoop.kms.authentication.zk-dt-secret-manager.znodeWorkingPath", "testZKPath");
conf.set("hadoop.kms.authentication.zk-dt-secret-manager.zkAuthType", "none");
}
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
writeConf(testDir, conf);
KMSCallable<KeyProvider> c =
new KMSCallable<KeyProvider>() {
@Override
public KeyProvider call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
final KeyProvider kp =
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<KeyProvider>() {
@Override
public KeyProvider run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
kp.createKey("k2", new byte[16],
new KeyProvider.Options(conf));
kp.createKey("k3", new byte[16],
new KeyProvider.Options(conf));
return kp;
}
});
return kp;
}
};
runServer(null, null, testDir, c);
} finally {
if (zkServer != null) {
zkServer.stop();
zkServer.close();
}
}
}
@Test
public void testProxyUserKerb() throws Exception {
doProxyUserTest(true);
}
@Test
public void testProxyUserSimple() throws Exception {
doProxyUserTest(false);
}
public void doProxyUserTest(final boolean kerberos) throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
if (kerberos) {
conf.set("hadoop.kms.authentication.type", "kerberos");
}
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set("hadoop.kms.proxyuser.client.users", "foo,bar");
conf.set("hadoop.kms.proxyuser.client.hosts", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kaa.ALL", "client");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kbb.ALL", "foo");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kcc.ALL", "foo1");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kdd.ALL", "bar");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
final URI uri = createKMSUri(getKMSUrl());
UserGroupInformation proxyUgi = null;
if (kerberos) {
// proxyuser client using kerberos credentials
proxyUgi = UserGroupInformation.
loginUserFromKeytabAndReturnUGI("client", keytab.getAbsolutePath());
} else {
proxyUgi = UserGroupInformation.createRemoteUser("client");
}
final UserGroupInformation clientUgi = proxyUgi;
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
final KeyProvider kp = createProvider(uri, conf);
kp.createKey("kaa", new KeyProvider.Options(conf));
// authorized proxyuser
UserGroupInformation fooUgi =
UserGroupInformation.createProxyUser("foo", clientUgi);
fooUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Assert.assertNotNull(kp.createKey("kbb",
new KeyProvider.Options(conf)));
return null;
}
});
// unauthorized proxyuser
UserGroupInformation foo1Ugi =
UserGroupInformation.createProxyUser("foo1", clientUgi);
foo1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
kp.createKey("kcc", new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
// OK
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
// authorized proxyuser
UserGroupInformation barUgi =
UserGroupInformation.createProxyUser("bar", clientUgi);
barUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Assert.assertNotNull(kp.createKey("kdd",
new KeyProvider.Options(conf)));
return null;
}
});
return null;
}
});
return null;
}
});
}
@Test
public void testWebHDFSProxyUserKerb() throws Exception {
doWebHDFSProxyUserTest(true);
}
@Test
public void testWebHDFSProxyUserSimple() throws Exception {
doWebHDFSProxyUserTest(false);
}
public void doWebHDFSProxyUserTest(final boolean kerberos) throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
if (kerberos) {
conf.set("hadoop.kms.authentication.type", "kerberos");
}
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set("hadoop.security.kms.client.timeout", "300");
conf.set("hadoop.kms.proxyuser.client.users", "foo,bar");
conf.set("hadoop.kms.proxyuser.client.hosts", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kaa.ALL", "foo");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kbb.ALL", "foo1");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kcc.ALL", "bar");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
final URI uri = createKMSUri(getKMSUrl());
UserGroupInformation proxyUgi = null;
if (kerberos) {
// proxyuser client using kerberos credentials
proxyUgi = UserGroupInformation.
loginUserFromKeytabAndReturnUGI("client", keytab.getAbsolutePath());
} else {
proxyUgi = UserGroupInformation.createRemoteUser("client");
}
final UserGroupInformation clientUgi = proxyUgi;
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// authorized proxyuser
UserGroupInformation fooUgi =
UserGroupInformation.createProxyUser("foo", clientUgi);
fooUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
Assert.assertNotNull(kp.createKey("kaa",
new KeyProvider.Options(conf)));
return null;
}
});
// unauthorized proxyuser
UserGroupInformation foo1Ugi =
UserGroupInformation.createProxyUser("foo1", clientUgi);
foo1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("kbb", new KeyProvider.Options(conf));
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage(), ex.getMessage().contains("Forbidden"));
}
return null;
}
});
// authorized proxyuser
UserGroupInformation barUgi =
UserGroupInformation.createProxyUser("bar", clientUgi);
barUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
Assert.assertNotNull(kp.createKey("kcc",
new KeyProvider.Options(conf)));
return null;
}
});
return null;
}
});
return null;
}
});
}
}
| 76,247 | 36.783944 | 125 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSACLs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert;
import org.junit.Test;
public class TestKMSACLs {
@Test
public void testDefaults() {
final KMSACLs acls = new KMSACLs(new Configuration(false));
for (KMSACLs.Type type : KMSACLs.Type.values()) {
Assert.assertTrue(acls.hasAccess(type,
UserGroupInformation.createRemoteUser("foo")));
}
}
@Test
public void testCustom() {
final Configuration conf = new Configuration(false);
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString() + " ");
}
final KMSACLs acls = new KMSACLs(conf);
for (KMSACLs.Type type : KMSACLs.Type.values()) {
Assert.assertTrue(acls.hasAccess(type,
UserGroupInformation.createRemoteUser(type.toString())));
Assert.assertFalse(acls.hasAccess(type,
UserGroupInformation.createRemoteUser("foo")));
}
}
@Test
public void testKeyAclConfigurationLoad() {
final Configuration conf = new Configuration(false);
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "test_key_1.MANAGEMENT", "CREATE");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "test_key_2.ALL", "CREATE");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "test_key_3.NONEXISTOPERATION", "CREATE");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "ROLLOVER");
conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT", "DECRYPT_EEK");
final KMSACLs acls = new KMSACLs(conf);
Assert.assertTrue("expected key ACL size is 2 but got " + acls.keyAcls.size(),
acls.keyAcls.size() == 2);
}
}
| 2,558 | 38.984375 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import java.io.ByteArrayOutputStream;
import java.io.FilterOutputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
public class TestKMSAudit {
private PrintStream originalOut;
private ByteArrayOutputStream memOut;
private FilterOut filterOut;
private PrintStream capturedOut;
private KMSAudit kmsAudit;
private static class FilterOut extends FilterOutputStream {
public FilterOut(OutputStream out) {
super(out);
}
public void setOutputStream(OutputStream out) {
this.out = out;
}
}
@Before
public void setUp() {
originalOut = System.err;
memOut = new ByteArrayOutputStream();
filterOut = new FilterOut(memOut);
capturedOut = new PrintStream(filterOut);
System.setErr(capturedOut);
PropertyConfigurator.configure(Thread.currentThread().
getContextClassLoader()
.getResourceAsStream("log4j-kmsaudit.properties"));
this.kmsAudit = new KMSAudit(1000);
}
@After
public void cleanUp() {
System.setErr(originalOut);
LogManager.resetConfiguration();
kmsAudit.shutdown();
}
private String getAndResetLogOutput() {
capturedOut.flush();
String logOutput = new String(memOut.toByteArray());
memOut = new ByteArrayOutputStream();
filterOut.setOutputStream(memOut);
return logOutput;
}
@Test
public void testAggregation() throws Exception {
UserGroupInformation luser = Mockito.mock(UserGroupInformation.class);
Mockito.when(luser.getShortUserName()).thenReturn("luser");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DELETE_KEY, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.ROLL_NEW_VERSION, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
Thread.sleep(1500);
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
Thread.sleep(1500);
String out = getAndResetLogOutput();
System.out.println(out);
Assert.assertTrue(
out.matches(
"OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"
// Not aggregated !!
+ "OK\\[op=DELETE_KEY, key=k1, user=luser\\] testmsg"
+ "OK\\[op=ROLL_NEW_VERSION, key=k1, user=luser\\] testmsg"
// Aggregated
+ "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=6, interval=[^m]{1,4}ms\\] testmsg"
+ "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"));
}
@Test
public void testAggregationUnauth() throws Exception {
UserGroupInformation luser = Mockito.mock(UserGroupInformation.class);
Mockito.when(luser.getShortUserName()).thenReturn("luser");
kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k2");
Thread.sleep(1000);
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k3");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
Thread.sleep(2000);
String out = getAndResetLogOutput();
System.out.println(out);
Assert.assertTrue(
out.matches(
"UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser\\] "
+ "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"
+ "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=5, interval=[^m]{1,4}ms\\] testmsg"
+ "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser\\] "
+ "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"));
}
}
| 5,245 | 37.573529 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/EagerKeyGeneratorKeyProviderCryptoExtension.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.security.NoSuchAlgorithmException;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.kms.ValueQueue;
import org.apache.hadoop.crypto.key.kms.ValueQueue.SyncGenerationPolicy;
/**
* A {@link KeyProviderCryptoExtension} that pre-generates and caches encrypted
* keys.
*/
@InterfaceAudience.Private
public class EagerKeyGeneratorKeyProviderCryptoExtension
extends KeyProviderCryptoExtension {
private static final String KEY_CACHE_PREFIX =
"hadoop.security.kms.encrypted.key.cache.";
public static final String KMS_KEY_CACHE_SIZE =
KEY_CACHE_PREFIX + "size";
public static final int KMS_KEY_CACHE_SIZE_DEFAULT = 100;
public static final String KMS_KEY_CACHE_LOW_WATERMARK =
KEY_CACHE_PREFIX + "low.watermark";
public static final float KMS_KEY_CACHE_LOW_WATERMARK_DEFAULT = 0.30f;
public static final String KMS_KEY_CACHE_EXPIRY_MS =
KEY_CACHE_PREFIX + "expiry";
public static final int KMS_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
public static final String KMS_KEY_CACHE_NUM_REFILL_THREADS =
KEY_CACHE_PREFIX + "num.fill.threads";
public static final int KMS_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT = 2;
private static class CryptoExtension
implements KeyProviderCryptoExtension.CryptoExtension {
private class EncryptedQueueRefiller implements
ValueQueue.QueueRefiller<EncryptedKeyVersion> {
@Override
public void fillQueueForKey(String keyName,
Queue<EncryptedKeyVersion> keyQueue, int numKeys) throws IOException {
List<EncryptedKeyVersion> retEdeks =
new LinkedList<EncryptedKeyVersion>();
for (int i = 0; i < numKeys; i++) {
try {
retEdeks.add(keyProviderCryptoExtension.generateEncryptedKey(
keyName));
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
}
keyQueue.addAll(retEdeks);
}
}
private KeyProviderCryptoExtension keyProviderCryptoExtension;
private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue;
public CryptoExtension(Configuration conf,
KeyProviderCryptoExtension keyProviderCryptoExtension) {
this.keyProviderCryptoExtension = keyProviderCryptoExtension;
encKeyVersionQueue =
new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
conf.getInt(KMS_KEY_CACHE_SIZE,
KMS_KEY_CACHE_SIZE_DEFAULT),
conf.getFloat(KMS_KEY_CACHE_LOW_WATERMARK,
KMS_KEY_CACHE_LOW_WATERMARK_DEFAULT),
conf.getInt(KMS_KEY_CACHE_EXPIRY_MS,
KMS_KEY_CACHE_EXPIRY_DEFAULT),
conf.getInt(KMS_KEY_CACHE_NUM_REFILL_THREADS,
KMS_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
SyncGenerationPolicy.LOW_WATERMARK, new EncryptedQueueRefiller()
);
}
@Override
public void warmUpEncryptedKeys(String... keyNames) throws
IOException {
try {
encKeyVersionQueue.initializeQueuesForKeys(keyNames);
} catch (ExecutionException e) {
throw new IOException(e);
}
}
@Override
public void drain(String keyName) {
encKeyVersionQueue.drain(keyName);
}
@Override
public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
throws IOException, GeneralSecurityException {
try {
return encKeyVersionQueue.getNext(encryptionKeyName);
} catch (ExecutionException e) {
throw new IOException(e);
}
}
@Override
public KeyVersion
decryptEncryptedKey(EncryptedKeyVersion encryptedKeyVersion)
throws IOException, GeneralSecurityException {
return keyProviderCryptoExtension.decryptEncryptedKey(
encryptedKeyVersion);
}
}
/**
* This class is a proxy for a <code>KeyProviderCryptoExtension</code> that
* decorates the underlying <code>CryptoExtension</code> with one that eagerly
* caches pre-generated Encrypted Keys using a <code>ValueQueue</code>
*
* @param conf Configuration object to load parameters from
* @param keyProviderCryptoExtension <code>KeyProviderCryptoExtension</code>
* to delegate calls to.
*/
public EagerKeyGeneratorKeyProviderCryptoExtension(Configuration conf,
KeyProviderCryptoExtension keyProviderCryptoExtension) {
super(keyProviderCryptoExtension,
new CryptoExtension(conf, keyProviderCryptoExtension));
}
@Override
public KeyVersion rollNewVersion(String name)
throws NoSuchAlgorithmException, IOException {
KeyVersion keyVersion = super.rollNewVersion(name);
getExtension().drain(name);
return keyVersion;
}
@Override
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
KeyVersion keyVersion = super.rollNewVersion(name, material);
getExtension().drain(name);
return keyVersion;
}
}
| 6,250 | 35.343023 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyACLs;
import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyOpType;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import com.google.common.annotations.VisibleForTesting;
/**
* Provides access to the <code>AccessControlList</code>s used by KMS,
* hot-reloading them if the <code>kms-acls.xml</code> file where the ACLs
* are defined has been updated.
*/
@InterfaceAudience.Private
public class KMSACLs implements Runnable, KeyACLs {
private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class);
private static final String UNAUTHORIZED_MSG_WITH_KEY =
"User:%s not allowed to do '%s' on '%s'";
private static final String UNAUTHORIZED_MSG_WITHOUT_KEY =
"User:%s not allowed to do '%s'";
public enum Type {
CREATE, DELETE, ROLLOVER, GET, GET_KEYS, GET_METADATA,
SET_KEY_MATERIAL, GENERATE_EEK, DECRYPT_EEK;
public String getAclConfigKey() {
return KMSConfiguration.CONFIG_PREFIX + "acl." + this.toString();
}
public String getBlacklistConfigKey() {
return KMSConfiguration.CONFIG_PREFIX + "blacklist." + this.toString();
}
}
public static final String ACL_DEFAULT = AccessControlList.WILDCARD_ACL_VALUE;
public static final int RELOADER_SLEEP_MILLIS = 1000;
private volatile Map<Type, AccessControlList> acls;
private volatile Map<Type, AccessControlList> blacklistedAcls;
@VisibleForTesting
volatile Map<String, HashMap<KeyOpType, AccessControlList>> keyAcls;
private final Map<KeyOpType, AccessControlList> defaultKeyAcls =
new HashMap<KeyOpType, AccessControlList>();
private final Map<KeyOpType, AccessControlList> whitelistKeyAcls =
new HashMap<KeyOpType, AccessControlList>();
private ScheduledExecutorService executorService;
private long lastReload;
KMSACLs(Configuration conf) {
if (conf == null) {
conf = loadACLs();
}
setKMSACLs(conf);
setKeyACLs(conf);
}
public KMSACLs() {
this(null);
}
private void setKMSACLs(Configuration conf) {
Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
Map<Type, AccessControlList> tempBlacklist = new HashMap<Type, AccessControlList>();
for (Type aclType : Type.values()) {
String aclStr = conf.get(aclType.getAclConfigKey(), ACL_DEFAULT);
tempAcls.put(aclType, new AccessControlList(aclStr));
String blacklistStr = conf.get(aclType.getBlacklistConfigKey());
if (blacklistStr != null) {
// Only add if blacklist is present
tempBlacklist.put(aclType, new AccessControlList(blacklistStr));
LOG.info("'{}' Blacklist '{}'", aclType, blacklistStr);
}
LOG.info("'{}' ACL '{}'", aclType, aclStr);
}
acls = tempAcls;
blacklistedAcls = tempBlacklist;
}
private void setKeyACLs(Configuration conf) {
Map<String, HashMap<KeyOpType, AccessControlList>> tempKeyAcls =
new HashMap<String, HashMap<KeyOpType,AccessControlList>>();
Map<String, String> allKeyACLS =
conf.getValByRegex(KMSConfiguration.KEY_ACL_PREFIX_REGEX);
for (Map.Entry<String, String> keyAcl : allKeyACLS.entrySet()) {
String k = keyAcl.getKey();
// this should be of type "key.acl.<KEY_NAME>.<OP_TYPE>"
int keyNameStarts = KMSConfiguration.KEY_ACL_PREFIX.length();
int keyNameEnds = k.lastIndexOf(".");
if (keyNameStarts >= keyNameEnds) {
LOG.warn("Invalid key name '{}'", k);
} else {
String aclStr = keyAcl.getValue();
String keyName = k.substring(keyNameStarts, keyNameEnds);
String keyOp = k.substring(keyNameEnds + 1);
KeyOpType aclType = null;
try {
aclType = KeyOpType.valueOf(keyOp);
} catch (IllegalArgumentException e) {
LOG.warn("Invalid key Operation '{}'", keyOp);
}
if (aclType != null) {
// On the assumption this will be single threaded.. else we need to
// ConcurrentHashMap
HashMap<KeyOpType,AccessControlList> aclMap =
tempKeyAcls.get(keyName);
if (aclMap == null) {
aclMap = new HashMap<KeyOpType, AccessControlList>();
tempKeyAcls.put(keyName, aclMap);
}
aclMap.put(aclType, new AccessControlList(aclStr));
LOG.info("KEY_NAME '{}' KEY_OP '{}' ACL '{}'",
keyName, aclType, aclStr);
}
}
}
keyAcls = tempKeyAcls;
for (KeyOpType keyOp : KeyOpType.values()) {
if (!defaultKeyAcls.containsKey(keyOp)) {
String confKey = KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + keyOp;
String aclStr = conf.get(confKey);
if (aclStr != null) {
if (keyOp == KeyOpType.ALL) {
// Ignore All operation for default key acl
LOG.warn("Should not configure default key ACL for KEY_OP '{}'", keyOp);
} else {
if (aclStr.equals("*")) {
LOG.info("Default Key ACL for KEY_OP '{}' is set to '*'", keyOp);
}
defaultKeyAcls.put(keyOp, new AccessControlList(aclStr));
}
}
}
if (!whitelistKeyAcls.containsKey(keyOp)) {
String confKey = KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + keyOp;
String aclStr = conf.get(confKey);
if (aclStr != null) {
if (keyOp == KeyOpType.ALL) {
// Ignore All operation for whitelist key acl
LOG.warn("Should not configure whitelist key ACL for KEY_OP '{}'", keyOp);
} else {
if (aclStr.equals("*")) {
LOG.info("Whitelist Key ACL for KEY_OP '{}' is set to '*'", keyOp);
}
whitelistKeyAcls.put(keyOp, new AccessControlList(aclStr));
}
}
}
}
}
@Override
public void run() {
try {
if (KMSConfiguration.isACLsFileNewer(lastReload)) {
setKMSACLs(loadACLs());
setKeyACLs(loadACLs());
}
} catch (Exception ex) {
LOG.warn(
String.format("Could not reload ACLs file: '%s'", ex.toString()), ex);
}
}
public synchronized void startReloader() {
if (executorService == null) {
executorService = Executors.newScheduledThreadPool(1);
executorService.scheduleAtFixedRate(this, RELOADER_SLEEP_MILLIS,
RELOADER_SLEEP_MILLIS, TimeUnit.MILLISECONDS);
}
}
public synchronized void stopReloader() {
if (executorService != null) {
executorService.shutdownNow();
executorService = null;
}
}
private Configuration loadACLs() {
LOG.debug("Loading ACLs file");
lastReload = System.currentTimeMillis();
Configuration conf = KMSConfiguration.getACLsConf();
// triggering the resource loading.
conf.get(Type.CREATE.getAclConfigKey());
return conf;
}
/**
* First Check if user is in ACL for the KMS operation, if yes, then
* return true if user is not present in any configured blacklist for
* the operation
* @param type KMS Operation
* @param ugi UserGroupInformation of user
* @return true is user has access
*/
public boolean hasAccess(Type type, UserGroupInformation ugi) {
boolean access = acls.get(type).isUserAllowed(ugi);
if (access) {
AccessControlList blacklist = blacklistedAcls.get(type);
access = (blacklist == null) || !blacklist.isUserInList(ugi);
}
return access;
}
public void assertAccess(KMSACLs.Type aclType,
UserGroupInformation ugi, KMSOp operation, String key)
throws AccessControlException {
if (!KMSWebApp.getACLs().hasAccess(aclType, ugi)) {
KMSWebApp.getUnauthorizedCallsMeter().mark();
KMSWebApp.getKMSAudit().unauthorized(ugi, operation, key);
throw new AuthorizationException(String.format(
(key != null) ? UNAUTHORIZED_MSG_WITH_KEY
: UNAUTHORIZED_MSG_WITHOUT_KEY,
ugi.getShortUserName(), operation, key));
}
}
@Override
public boolean hasAccessToKey(String keyName, UserGroupInformation ugi,
KeyOpType opType) {
return checkKeyAccess(keyName, ugi, opType)
|| checkKeyAccess(whitelistKeyAcls, ugi, opType);
}
private boolean checkKeyAccess(String keyName, UserGroupInformation ugi,
KeyOpType opType) {
Map<KeyOpType, AccessControlList> keyAcl = keyAcls.get(keyName);
if (keyAcl == null) {
// If No key acl defined for this key, check to see if
// there are key defaults configured for this operation
keyAcl = defaultKeyAcls;
}
return checkKeyAccess(keyAcl, ugi, opType);
}
private boolean checkKeyAccess(Map<KeyOpType, AccessControlList> keyAcl,
UserGroupInformation ugi, KeyOpType opType) {
AccessControlList acl = keyAcl.get(opType);
if (acl == null) {
// If no acl is specified for this operation,
// deny access
return false;
} else {
return acl.isUserAllowed(ugi);
}
}
@Override
public boolean isACLPresent(String keyName, KeyOpType opType) {
return (keyAcls.containsKey(keyName)
|| defaultKeyAcls.containsKey(opType)
|| whitelistKeyAcls.containsKey(opType));
}
}
| 10,688 | 35.481229 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
/**
* Class providing the REST bindings, via Jersey, for the KMS.
*/
@Path(KMSRESTConstants.SERVICE_VERSION)
@InterfaceAudience.Private
public class KMS {
public static enum KMSOp {
CREATE_KEY, DELETE_KEY, ROLL_NEW_VERSION,
GET_KEYS, GET_KEYS_METADATA,
GET_KEY_VERSIONS, GET_METADATA, GET_KEY_VERSION, GET_CURRENT_KEY,
GENERATE_EEK, DECRYPT_EEK
}
private KeyProviderCryptoExtension provider;
private KMSAudit kmsAudit;
public KMS() throws Exception {
provider = KMSWebApp.getKeyProvider();
kmsAudit= KMSWebApp.getKMSAudit();
}
private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi,
KMSOp operation) throws AccessControlException {
KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, null);
}
private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi,
KMSOp operation, String key) throws AccessControlException {
KMSWebApp.getACLs().assertAccess(aclType, ugi, operation, key);
}
private static KeyProvider.KeyVersion removeKeyMaterial(
KeyProvider.KeyVersion keyVersion) {
return new KMSClientProvider.KMSKeyVersion(keyVersion.getName(),
keyVersion.getVersionName(), null);
}
private static URI getKeyURI(String name) throws URISyntaxException {
return new URI(KMSRESTConstants.SERVICE_VERSION + "/" +
KMSRESTConstants.KEY_RESOURCE + "/" + name);
}
@POST
@Path(KMSRESTConstants.KEYS_RESOURCE)
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@SuppressWarnings("unchecked")
public Response createKey(Map jsonKey) throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
UserGroupInformation user = HttpUserGroupInformation.get();
final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
final String material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
String description = (String)
jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
Map<String, String> attributes = (Map<String, String>)
jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
KMSOp.CREATE_KEY, name);
}
final KeyProvider.Options options = new KeyProvider.Options(
KMSWebApp.getConfiguration());
if (cipher != null) {
options.setCipher(cipher);
}
if (length != 0) {
options.setBitLength(length);
}
options.setDescription(description);
options.setAttributes(attributes);
KeyProvider.KeyVersion keyVersion = user.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
KeyProvider.KeyVersion keyVersion = (material != null)
? provider.createKey(name, Base64.decodeBase64(material), options)
: provider.createKey(name, options);
provider.flush();
return keyVersion;
}
}
);
kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
(material != null) + " Description:" + description);
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
keyVersion = removeKeyMaterial(keyVersion);
}
Map json = KMSServerJSONUtils.toJSON(keyVersion);
String requestURL = KMSMDCFilter.getURL();
int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
requestURL = requestURL.substring(0, idx);
String keyURL = requestURL + KMSRESTConstants.KEY_RESOURCE + "/" + name;
return Response.created(getKeyURI(name)).type(MediaType.APPLICATION_JSON).
header("Location", keyURL).entity(json).build();
}
@DELETE
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
public Response deleteKey(@PathParam("name") final String name)
throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
UserGroupInformation user = HttpUserGroupInformation.get();
assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
KMSClientProvider.checkNotEmpty(name, "name");
user.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
provider.deleteKey(name);
provider.flush();
return null;
}
});
kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
return Response.ok().build();
}
@POST
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response rolloverKey(@PathParam("name") final String name,
Map jsonMaterial) throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
UserGroupInformation user = HttpUserGroupInformation.get();
assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
KMSClientProvider.checkNotEmpty(name, "name");
final String material = (String)
jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
KMSOp.ROLL_NEW_VERSION, name);
}
KeyProvider.KeyVersion keyVersion = user.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
KeyVersion keyVersion = (material != null)
? provider.rollNewVersion(name, Base64.decodeBase64(material))
: provider.rollNewVersion(name);
provider.flush();
return keyVersion;
}
}
);
kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
(material != null) + " NewVersion:" + keyVersion.getVersionName());
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
keyVersion = removeKeyMaterial(keyVersion);
}
Map json = KMSServerJSONUtils.toJSON(keyVersion);
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@GET
@Path(KMSRESTConstants.KEYS_METADATA_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response getKeysMetadata(@QueryParam(KMSRESTConstants.KEY)
List<String> keyNamesList) throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
UserGroupInformation user = HttpUserGroupInformation.get();
final String[] keyNames = keyNamesList.toArray(
new String[keyNamesList.size()]);
assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);
KeyProvider.Metadata[] keysMeta = user.doAs(
new PrivilegedExceptionAction<KeyProvider.Metadata[]>() {
@Override
public KeyProvider.Metadata[] run() throws Exception {
return provider.getKeysMetadata(keyNames);
}
}
);
Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@GET
@Path(KMSRESTConstants.KEYS_NAMES_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response getKeyNames() throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
UserGroupInformation user = HttpUserGroupInformation.get();
assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS);
List<String> json = user.doAs(
new PrivilegedExceptionAction<List<String>>() {
@Override
public List<String> run() throws Exception {
return provider.getKeys();
}
}
);
kmsAudit.ok(user, KMSOp.GET_KEYS, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
public Response getKey(@PathParam("name") String name)
throws Exception {
return getMetadata(name);
}
@GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
KMSRESTConstants.METADATA_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response getMetadata(@PathParam("name") final String name)
throws Exception {
UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getAdminCallsMeter().mark();
assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
KeyProvider.Metadata metadata = user.doAs(
new PrivilegedExceptionAction<KeyProvider.Metadata>() {
@Override
public KeyProvider.Metadata run() throws Exception {
return provider.getMetadata(name);
}
}
);
Object json = KMSServerJSONUtils.toJSON(name, metadata);
kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
KMSRESTConstants.CURRENT_VERSION_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response getCurrentVersion(@PathParam("name") final String name)
throws Exception {
UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getKeyCallsMeter().mark();
assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
KeyVersion keyVersion = user.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
return provider.getCurrentKey(name);
}
}
);
Object json = KMSServerJSONUtils.toJSON(keyVersion);
kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@GET
@Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response getKeyVersion(
@PathParam("versionName") final String versionName) throws Exception {
UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(versionName, "versionName");
KMSWebApp.getKeyCallsMeter().mark();
assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
KeyVersion keyVersion = user.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
return provider.getKeyVersion(versionName);
}
}
);
if (keyVersion != null) {
kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
}
Object json = KMSServerJSONUtils.toJSON(keyVersion);
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
KMSRESTConstants.EEK_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response generateEncryptedKeys(
@PathParam("name") final String name,
@QueryParam(KMSRESTConstants.EEK_OP) String edekOp,
@DefaultValue("1")
@QueryParam(KMSRESTConstants.EEK_NUM_KEYS) final int numKeys)
throws Exception {
UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(name, "name");
KMSClientProvider.checkNotNull(edekOp, "eekOp");
Object retJSON;
if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);
final List<EncryptedKeyVersion> retEdeks =
new LinkedList<EncryptedKeyVersion>();
try {
user.doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
for (int i = 0; i < numKeys; i++) {
retEdeks.add(provider.generateEncryptedKey(name));
}
return null;
}
}
);
} catch (Exception e) {
throw new IOException(e);
}
kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
retJSON = new ArrayList();
for (EncryptedKeyVersion edek : retEdeks) {
((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
}
} else {
throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
" value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
KMSRESTConstants.EEK_DECRYPT);
}
KMSWebApp.getGenerateEEKCallsMeter().mark();
return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
.build();
}
@SuppressWarnings("rawtypes")
@POST
@Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}/" +
KMSRESTConstants.EEK_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response decryptEncryptedKey(
@PathParam("versionName") final String versionName,
@QueryParam(KMSRESTConstants.EEK_OP) String eekOp,
Map jsonPayload)
throws Exception {
UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(versionName, "versionName");
KMSClientProvider.checkNotNull(eekOp, "eekOp");
final String keyName = (String) jsonPayload.get(
KMSRESTConstants.NAME_FIELD);
String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
String encMaterialStr =
(String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
Object retJSON;
if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName);
KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
final byte[] iv = Base64.decodeBase64(ivStr);
KMSClientProvider.checkNotNull(encMaterialStr,
KMSRESTConstants.MATERIAL_FIELD);
final byte[] encMaterial = Base64.decodeBase64(encMaterialStr);
KeyProvider.KeyVersion retKeyVersion = user.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
return provider.decryptEncryptedKey(
new KMSClientProvider.KMSEncryptedKeyVersion(keyName,
versionName, iv, KeyProviderCryptoExtension.EEK,
encMaterial)
);
}
}
);
retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
} else {
throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
" value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
KMSRESTConstants.EEK_DECRYPT);
}
KMSWebApp.getDecryptEEKCallsMeter().mark();
return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
.build();
}
@GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
KMSRESTConstants.VERSIONS_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response getKeyVersions(@PathParam("name") final String name)
throws Exception {
UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getKeyCallsMeter().mark();
assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
List<KeyVersion> ret = user.doAs(
new PrivilegedExceptionAction<List<KeyVersion>>() {
@Override
public List<KeyVersion> run() throws Exception {
return provider.getKeyVersions(name);
}
}
);
Object json = KMSServerJSONUtils.toJSON(ret);
kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
}
| 18,150 | 36.57971 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
/**
* Provides convenience methods for audit logging consistently the different
* types of events.
*/
public class KMSAudit {
private static class AuditEvent {
private final AtomicLong accessCount = new AtomicLong(-1);
private final String keyName;
private final String user;
private final KMS.KMSOp op;
private final String extraMsg;
private final long startTime = System.currentTimeMillis();
private AuditEvent(String keyName, String user, KMS.KMSOp op, String msg) {
this.keyName = keyName;
this.user = user;
this.op = op;
this.extraMsg = msg;
}
public String getExtraMsg() {
return extraMsg;
}
public AtomicLong getAccessCount() {
return accessCount;
}
public String getKeyName() {
return keyName;
}
public String getUser() {
return user;
}
public KMS.KMSOp getOp() {
return op;
}
public long getStartTime() {
return startTime;
}
}
public static enum OpStatus {
OK, UNAUTHORIZED, UNAUTHENTICATED, ERROR;
}
private static Set<KMS.KMSOp> AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
KMS.KMSOp.GET_KEY_VERSION, KMS.KMSOp.GET_CURRENT_KEY,
KMS.KMSOp.DECRYPT_EEK, KMS.KMSOp.GENERATE_EEK
);
private Cache<String, AuditEvent> cache;
private ScheduledExecutorService executor;
public static final String KMS_LOGGER_NAME = "kms-audit";
private static Logger AUDIT_LOG = LoggerFactory.getLogger(KMS_LOGGER_NAME);
/**
* Create a new KMSAudit.
*
* @param windowMs Duplicate events within the aggregation window are quashed
* to reduce log traffic. A single message for aggregated
* events is printed at the end of the window, along with a
* count of the number of aggregated events.
*/
KMSAudit(long windowMs) {
cache = CacheBuilder.newBuilder()
.expireAfterWrite(windowMs, TimeUnit.MILLISECONDS)
.removalListener(
new RemovalListener<String, AuditEvent>() {
@Override
public void onRemoval(
RemovalNotification<String, AuditEvent> entry) {
AuditEvent event = entry.getValue();
if (event.getAccessCount().get() > 0) {
KMSAudit.this.logEvent(event);
event.getAccessCount().set(0);
KMSAudit.this.cache.put(entry.getKey(), event);
}
}
}).build();
executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
.setDaemon(true).setNameFormat(KMS_LOGGER_NAME + "_thread").build());
executor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
cache.cleanUp();
}
}, windowMs / 10, windowMs / 10, TimeUnit.MILLISECONDS);
}
private void logEvent(AuditEvent event) {
AUDIT_LOG.info(
"OK[op={}, key={}, user={}, accessCount={}, interval={}ms] {}",
event.getOp(), event.getKeyName(), event.getUser(),
event.getAccessCount().get(),
(System.currentTimeMillis() - event.getStartTime()),
event.getExtraMsg());
}
private void op(OpStatus opStatus, final KMS.KMSOp op, final String user,
final String key, final String extraMsg) {
if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key)
&& (op != null)
&& AGGREGATE_OPS_WHITELIST.contains(op)) {
String cacheKey = createCacheKey(user, key, op);
if (opStatus == OpStatus.UNAUTHORIZED) {
cache.invalidate(cacheKey);
AUDIT_LOG.info("UNAUTHORIZED[op={}, key={}, user={}] {}", op, key, user,
extraMsg);
} else {
try {
AuditEvent event = cache.get(cacheKey, new Callable<AuditEvent>() {
@Override
public AuditEvent call() throws Exception {
return new AuditEvent(key, user, op, extraMsg);
}
});
// Log first access (initialized as -1 so
// incrementAndGet() == 0 implies first access)
if (event.getAccessCount().incrementAndGet() == 0) {
event.getAccessCount().incrementAndGet();
logEvent(event);
}
} catch (ExecutionException ex) {
throw new RuntimeException(ex);
}
}
} else {
List<String> kvs = new LinkedList<String>();
if (op != null) {
kvs.add("op=" + op);
}
if (!Strings.isNullOrEmpty(key)) {
kvs.add("key=" + key);
}
if (!Strings.isNullOrEmpty(user)) {
kvs.add("user=" + user);
}
if (kvs.size() == 0) {
AUDIT_LOG.info("{} {}", opStatus.toString(), extraMsg);
} else {
String join = Joiner.on(", ").join(kvs);
AUDIT_LOG.info("{}[{}] {}", opStatus.toString(), join, extraMsg);
}
}
}
public void ok(UserGroupInformation user, KMS.KMSOp op, String key,
String extraMsg) {
op(OpStatus.OK, op, user.getShortUserName(), key, extraMsg);
}
public void ok(UserGroupInformation user, KMS.KMSOp op, String extraMsg) {
op(OpStatus.OK, op, user.getShortUserName(), null, extraMsg);
}
public void unauthorized(UserGroupInformation user, KMS.KMSOp op, String key) {
op(OpStatus.UNAUTHORIZED, op, user.getShortUserName(), key, "");
}
public void error(UserGroupInformation user, String method, String url,
String extraMsg) {
op(OpStatus.ERROR, null, user.getShortUserName(), null, "Method:'" + method
+ "' Exception:'" + extraMsg + "'");
}
public void unauthenticated(String remoteHost, String method,
String url, String extraMsg) {
op(OpStatus.UNAUTHENTICATED, null, null, null, "RemoteHost:"
+ remoteHost + " Method:" + method
+ " URL:" + url + " ErrorMsg:'" + extraMsg + "'");
}
private static String createCacheKey(String user, String key, KMS.KMSOp op) {
return user + "#" + key + "#" + op;
}
public void shutdown() {
executor.shutdownNow();
}
}
| 7,702 | 32.34632 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticationHandler;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpServletResponseWrapper;
import java.io.IOException;
import java.util.Map;
import java.util.Properties;
/**
* Authentication filter that takes the configuration from the KMS configuration
* file.
*/
@InterfaceAudience.Private
public class KMSAuthenticationFilter
extends DelegationTokenAuthenticationFilter {
public static final String CONFIG_PREFIX = KMSConfiguration.CONFIG_PREFIX +
"authentication.";
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties props = new Properties();
Configuration conf = KMSWebApp.getConfiguration();
for (Map.Entry<String, String> entry : conf) {
String name = entry.getKey();
if (name.startsWith(CONFIG_PREFIX)) {
String value = conf.get(name);
name = name.substring(CONFIG_PREFIX.length());
props.setProperty(name, value);
}
}
String authType = props.getProperty(AUTH_TYPE);
if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
} else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
}
props.setProperty(DelegationTokenAuthenticationHandler.TOKEN_KIND,
KMSClientProvider.TOKEN_KIND);
return props;
}
protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) {
Map<String, String> proxyuserConf = KMSWebApp.getConfiguration().
getValByRegex("hadoop\\.kms\\.proxyuser\\.");
Configuration conf = new Configuration(false);
for (Map.Entry<String, String> entry : proxyuserConf.entrySet()) {
conf.set(entry.getKey().substring("hadoop.kms.".length()),
entry.getValue());
}
return conf;
}
private static class KMSResponse extends HttpServletResponseWrapper {
public int statusCode;
public String msg;
public KMSResponse(ServletResponse response) {
super((HttpServletResponse)response);
}
@Override
public void setStatus(int sc) {
statusCode = sc;
super.setStatus(sc);
}
@Override
public void sendError(int sc, String msg) throws IOException {
statusCode = sc;
this.msg = msg;
super.sendError(sc, msg);
}
@Override
public void sendError(int sc) throws IOException {
statusCode = sc;
super.sendError(sc);
}
@Override
public void setStatus(int sc, String sm) {
statusCode = sc;
msg = sm;
super.setStatus(sc, sm);
}
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain filterChain) throws IOException, ServletException {
KMSResponse kmsResponse = new KMSResponse(response);
super.doFilter(request, kmsResponse, filterChain);
if (kmsResponse.statusCode != HttpServletResponse.SC_OK &&
kmsResponse.statusCode != HttpServletResponse.SC_CREATED &&
kmsResponse.statusCode != HttpServletResponse.SC_UNAUTHORIZED) {
KMSWebApp.getInvalidCallsMeter().mark();
}
// HttpServletResponse.SC_UNAUTHORIZED is because the request does not
// belong to an authenticated user.
if (kmsResponse.statusCode == HttpServletResponse.SC_UNAUTHORIZED) {
KMSWebApp.getUnauthenticatedCallsMeter().mark();
String method = ((HttpServletRequest) request).getMethod();
StringBuffer requestURL = ((HttpServletRequest) request).getRequestURL();
String queryString = ((HttpServletRequest) request).getQueryString();
if (queryString != null) {
requestURL.append("?").append(queryString);
}
KMSWebApp.getKMSAudit().unauthenticated(
request.getRemoteHost(), method, requestURL.toString(),
kmsResponse.msg);
}
}
}
| 5,777 | 36.277419 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import java.io.File;
import java.net.MalformedURLException;
import java.net.URL;
/**
* Utility class to load KMS configuration files.
*/
@InterfaceAudience.Private
public class KMSConfiguration {
public static final String KMS_CONFIG_DIR = "kms.config.dir";
public static final String KMS_SITE_XML = "kms-site.xml";
public static final String KMS_ACLS_XML = "kms-acls.xml";
public static final String CONFIG_PREFIX = "hadoop.kms.";
public static final String KEY_ACL_PREFIX = "key.acl.";
public static final String KEY_ACL_PREFIX_REGEX = "^key\\.acl\\..+";
public static final String DEFAULT_KEY_ACL_PREFIX = "default.key.acl.";
public static final String WHITELIST_KEY_ACL_PREFIX = "whitelist.key.acl.";
// Property to set the backing KeyProvider
public static final String KEY_PROVIDER_URI = CONFIG_PREFIX +
"key.provider.uri";
// Property to Enable/Disable Caching
public static final String KEY_CACHE_ENABLE = CONFIG_PREFIX +
"cache.enable";
// Timeout for the Key and Metadata Cache
public static final String KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
"cache.timeout.ms";
// TImeout for the Current Key cache
public static final String CURR_KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
"current.key.cache.timeout.ms";
// Delay for Audit logs that need aggregation
public static final String KMS_AUDIT_AGGREGATION_WINDOW = CONFIG_PREFIX +
"audit.aggregation.window.ms";
public static final boolean KEY_CACHE_ENABLE_DEFAULT = true;
// 10 mins
public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 60 * 1000;
// 30 secs
public static final long CURR_KEY_CACHE_TIMEOUT_DEFAULT = 30 * 1000;
// 10 secs
public static final long KMS_AUDIT_AGGREGATION_WINDOW_DEFAULT = 10000;
// Property to Enable/Disable per Key authorization
public static final String KEY_AUTHORIZATION_ENABLE = CONFIG_PREFIX +
"key.authorization.enable";
public static final boolean KEY_AUTHORIZATION_ENABLE_DEFAULT = true;
static Configuration getConfiguration(boolean loadHadoopDefaults,
String ... resources) {
Configuration conf = new Configuration(loadHadoopDefaults);
String confDir = System.getProperty(KMS_CONFIG_DIR);
if (confDir != null) {
try {
Path confPath = new Path(confDir);
if (!confPath.isUriPathAbsolute()) {
throw new RuntimeException("System property '" + KMS_CONFIG_DIR +
"' must be an absolute path: " + confDir);
}
for (String resource : resources) {
conf.addResource(new URL("file://" + new Path(confDir, resource).toUri()));
}
} catch (MalformedURLException ex) {
throw new RuntimeException(ex);
}
} else {
for (String resource : resources) {
conf.addResource(resource);
}
}
return conf;
}
public static Configuration getKMSConf() {
return getConfiguration(true, "core-site.xml", KMS_SITE_XML);
}
public static Configuration getACLsConf() {
return getConfiguration(false, KMS_ACLS_XML);
}
public static boolean isACLsFileNewer(long time) {
boolean newer = false;
String confDir = System.getProperty(KMS_CONFIG_DIR);
if (confDir != null) {
Path confPath = new Path(confDir);
if (!confPath.isUriPathAbsolute()) {
throw new RuntimeException("System property '" + KMS_CONFIG_DIR +
"' must be an absolute path: " + confDir);
}
File f = new File(confDir, KMS_ACLS_XML);
// at least 100ms newer than time, we do this to ensure the file
// has been properly closed/flushed
newer = f.lastModified() - time > 100;
}
return newer;
}
}
| 4,677 | 36.126984 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import com.codahale.metrics.JmxReporter;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.CachingKeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.VersionInfo;
import org.apache.log4j.PropertyConfigurator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.bridge.SLF4JBridgeHandler;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import java.io.File;
import java.net.URI;
import java.net.URL;
import java.util.List;
@InterfaceAudience.Private
public class KMSWebApp implements ServletContextListener {
private static final String LOG4J_PROPERTIES = "kms-log4j.properties";
private static final String METRICS_PREFIX = "hadoop.kms.";
private static final String ADMIN_CALLS_METER = METRICS_PREFIX +
"admin.calls.meter";
private static final String KEY_CALLS_METER = METRICS_PREFIX +
"key.calls.meter";
private static final String INVALID_CALLS_METER = METRICS_PREFIX +
"invalid.calls.meter";
private static final String UNAUTHORIZED_CALLS_METER = METRICS_PREFIX +
"unauthorized.calls.meter";
private static final String UNAUTHENTICATED_CALLS_METER = METRICS_PREFIX +
"unauthenticated.calls.meter";
private static final String GENERATE_EEK_METER = METRICS_PREFIX +
"generate_eek.calls.meter";
private static final String DECRYPT_EEK_METER = METRICS_PREFIX +
"decrypt_eek.calls.meter";
private static Logger LOG;
private static MetricRegistry metricRegistry;
private JmxReporter jmxReporter;
private static Configuration kmsConf;
private static KMSACLs kmsAcls;
private static Meter adminCallsMeter;
private static Meter keyCallsMeter;
private static Meter unauthorizedCallsMeter;
private static Meter unauthenticatedCallsMeter;
private static Meter decryptEEKCallsMeter;
private static Meter generateEEKCallsMeter;
private static Meter invalidCallsMeter;
private static KMSAudit kmsAudit;
private static KeyProviderCryptoExtension keyProviderCryptoExtension;
static {
SLF4JBridgeHandler.removeHandlersForRootLogger();
SLF4JBridgeHandler.install();
}
private void initLogging(String confDir) {
if (System.getProperty("log4j.configuration") == null) {
System.setProperty("log4j.defaultInitOverride", "true");
boolean fromClasspath = true;
File log4jConf = new File(confDir, LOG4J_PROPERTIES).getAbsoluteFile();
if (log4jConf.exists()) {
PropertyConfigurator.configureAndWatch(log4jConf.getPath(), 1000);
fromClasspath = false;
} else {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL log4jUrl = cl.getResource(LOG4J_PROPERTIES);
if (log4jUrl != null) {
PropertyConfigurator.configure(log4jUrl);
}
}
LOG = LoggerFactory.getLogger(KMSWebApp.class);
LOG.debug("KMS log starting");
if (fromClasspath) {
LOG.warn("Log4j configuration file '{}' not found", LOG4J_PROPERTIES);
LOG.warn("Logging with INFO level to standard output");
}
} else {
LOG = LoggerFactory.getLogger(KMSWebApp.class);
}
}
@Override
public void contextInitialized(ServletContextEvent sce) {
try {
String confDir = System.getProperty(KMSConfiguration.KMS_CONFIG_DIR);
if (confDir == null) {
throw new RuntimeException("System property '" +
KMSConfiguration.KMS_CONFIG_DIR + "' not defined");
}
kmsConf = KMSConfiguration.getKMSConf();
initLogging(confDir);
LOG.info("-------------------------------------------------------------");
LOG.info(" Java runtime version : {}", System.getProperty(
"java.runtime.version"));
LOG.info(" KMS Hadoop Version: " + VersionInfo.getVersion());
LOG.info("-------------------------------------------------------------");
kmsAcls = new KMSACLs();
kmsAcls.startReloader();
metricRegistry = new MetricRegistry();
jmxReporter = JmxReporter.forRegistry(metricRegistry).build();
jmxReporter.start();
generateEEKCallsMeter = metricRegistry.register(GENERATE_EEK_METER,
new Meter());
decryptEEKCallsMeter = metricRegistry.register(DECRYPT_EEK_METER,
new Meter());
adminCallsMeter = metricRegistry.register(ADMIN_CALLS_METER, new Meter());
keyCallsMeter = metricRegistry.register(KEY_CALLS_METER, new Meter());
invalidCallsMeter = metricRegistry.register(INVALID_CALLS_METER,
new Meter());
unauthorizedCallsMeter = metricRegistry.register(UNAUTHORIZED_CALLS_METER,
new Meter());
unauthenticatedCallsMeter = metricRegistry.register(
UNAUTHENTICATED_CALLS_METER, new Meter());
kmsAudit =
new KMSAudit(kmsConf.getLong(
KMSConfiguration.KMS_AUDIT_AGGREGATION_WINDOW,
KMSConfiguration.KMS_AUDIT_AGGREGATION_WINDOW_DEFAULT));
// this is required for the the JMXJsonServlet to work properly.
// the JMXJsonServlet is behind the authentication filter,
// thus the '*' ACL.
sce.getServletContext().setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE,
kmsConf);
sce.getServletContext().setAttribute(HttpServer2.ADMINS_ACL,
new AccessControlList(AccessControlList.WILDCARD_ACL_VALUE));
// intializing the KeyProvider
String providerString = kmsConf.get(KMSConfiguration.KEY_PROVIDER_URI);
if (providerString == null) {
throw new IllegalStateException("No KeyProvider has been defined");
}
KeyProvider keyProvider =
KeyProviderFactory.get(new URI(providerString), kmsConf);
if (kmsConf.getBoolean(KMSConfiguration.KEY_CACHE_ENABLE,
KMSConfiguration.KEY_CACHE_ENABLE_DEFAULT)) {
long keyTimeOutMillis =
kmsConf.getLong(KMSConfiguration.KEY_CACHE_TIMEOUT_KEY,
KMSConfiguration.KEY_CACHE_TIMEOUT_DEFAULT);
long currKeyTimeOutMillis =
kmsConf.getLong(KMSConfiguration.CURR_KEY_CACHE_TIMEOUT_KEY,
KMSConfiguration.CURR_KEY_CACHE_TIMEOUT_DEFAULT);
keyProvider = new CachingKeyProvider(keyProvider, keyTimeOutMillis,
currKeyTimeOutMillis);
}
LOG.info("Initialized KeyProvider " + keyProvider);
keyProviderCryptoExtension = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(keyProvider);
keyProviderCryptoExtension =
new EagerKeyGeneratorKeyProviderCryptoExtension(kmsConf,
keyProviderCryptoExtension);
if (kmsConf.getBoolean(KMSConfiguration.KEY_AUTHORIZATION_ENABLE,
KMSConfiguration.KEY_AUTHORIZATION_ENABLE_DEFAULT)) {
keyProviderCryptoExtension =
new KeyAuthorizationKeyProvider(
keyProviderCryptoExtension, kmsAcls);
}
LOG.info("Initialized KeyProviderCryptoExtension "
+ keyProviderCryptoExtension);
final int defaultBitlength = kmsConf
.getInt(KeyProvider.DEFAULT_BITLENGTH_NAME,
KeyProvider.DEFAULT_BITLENGTH);
LOG.info("Default key bitlength is {}", defaultBitlength);
LOG.info("KMS Started");
} catch (Throwable ex) {
System.out.println();
System.out.println("ERROR: Hadoop KMS could not be started");
System.out.println();
System.out.println("REASON: " + ex.toString());
System.out.println();
System.out.println("Stacktrace:");
System.out.println("---------------------------------------------------");
ex.printStackTrace(System.out);
System.out.println("---------------------------------------------------");
System.out.println();
System.exit(1);
}
}
@Override
public void contextDestroyed(ServletContextEvent sce) {
kmsAudit.shutdown();
kmsAcls.stopReloader();
jmxReporter.stop();
jmxReporter.close();
metricRegistry = null;
LOG.info("KMS Stopped");
}
public static Configuration getConfiguration() {
return new Configuration(kmsConf);
}
public static KMSACLs getACLs() {
return kmsAcls;
}
public static Meter getAdminCallsMeter() {
return adminCallsMeter;
}
public static Meter getKeyCallsMeter() {
return keyCallsMeter;
}
public static Meter getInvalidCallsMeter() {
return invalidCallsMeter;
}
public static Meter getGenerateEEKCallsMeter() {
return generateEEKCallsMeter;
}
public static Meter getDecryptEEKCallsMeter() {
return decryptEEKCallsMeter;
}
public static Meter getUnauthorizedCallsMeter() {
return unauthorizedCallsMeter;
}
public static Meter getUnauthenticatedCallsMeter() {
return unauthenticatedCallsMeter;
}
public static KeyProviderCryptoExtension getKeyProvider() {
return keyProviderCryptoExtension;
}
public static KMSAudit getKMSAudit() {
return kmsAudit;
}
}
| 10,180 | 36.707407 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSMDCFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
/**
* Servlet filter that captures context of the HTTP request to be use in the
* scope of KMS calls on the server side.
*/
@InterfaceAudience.Private
public class KMSMDCFilter implements Filter {
private static class Data {
private UserGroupInformation ugi;
private String method;
private StringBuffer url;
private Data(UserGroupInformation ugi, String method, StringBuffer url) {
this.ugi = ugi;
this.method = method;
this.url = url;
}
}
private static final ThreadLocal<Data> DATA_TL = new ThreadLocal<Data>();
public static UserGroupInformation getUgi() {
return DATA_TL.get().ugi;
}
public static String getMethod() {
return DATA_TL.get().method;
}
public static String getURL() {
return DATA_TL.get().url.toString();
}
@Override
public void init(FilterConfig config) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain)
throws IOException, ServletException {
try {
DATA_TL.remove();
UserGroupInformation ugi = HttpUserGroupInformation.get();
String method = ((HttpServletRequest) request).getMethod();
StringBuffer requestURL = ((HttpServletRequest) request).getRequestURL();
String queryString = ((HttpServletRequest) request).getQueryString();
if (queryString != null) {
requestURL.append("?").append(queryString);
}
DATA_TL.set(new Data(ugi, method, requestURL));
chain.doFilter(request, response);
} finally {
DATA_TL.remove();
}
}
@Override
public void destroy() {
}
}
| 2,986 | 30.776596 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.codehaus.jackson.map.ObjectMapper;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.MessageBodyWriter;
import javax.ws.rs.ext.Provider;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Map;
/**
* Jersey provider that converts <code>Map</code>s and <code>List</code>s
* to their JSON representation.
*/
@Provider
@Produces(MediaType.APPLICATION_JSON)
@InterfaceAudience.Private
public class KMSJSONWriter implements MessageBodyWriter<Object> {
@Override
public boolean isWriteable(Class<?> aClass, Type type,
Annotation[] annotations, MediaType mediaType) {
return Map.class.isAssignableFrom(aClass) ||
List.class.isAssignableFrom(aClass);
}
@Override
public long getSize(Object obj, Class<?> aClass, Type type,
Annotation[] annotations, MediaType mediaType) {
return -1;
}
@Override
public void writeTo(Object obj, Class<?> aClass, Type type,
Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, Object> stringObjectMultivaluedMap,
OutputStream outputStream) throws IOException, WebApplicationException {
Writer writer = new OutputStreamWriter(outputStream, Charset
.forName("UTF-8"));
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj);
}
}
| 2,574 | 34.273973 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJMXServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.jmx.JMXJsonServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
@InterfaceAudience.Private
public class KMSJMXServlet extends JMXJsonServlet {
@Override
protected boolean isInstrumentationAccessAllowed(HttpServletRequest request,
HttpServletResponse response) throws IOException {
return true;
}
}
| 1,336 | 35.135135 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import com.sun.jersey.api.container.ContainerException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import javax.ws.rs.ext.Provider;
import java.io.IOException;
/**
* Jersey provider that converts KMS exceptions into detailed HTTP errors.
*/
@Provider
@InterfaceAudience.Private
public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
private static Logger LOG =
LoggerFactory.getLogger(KMSExceptionsProvider.class);
private static final String ENTER = System.getProperty("line.separator");
protected Response createResponse(Response.Status status, Throwable ex) {
return HttpExceptionUtils.createJerseyExceptionResponse(status, ex);
}
protected String getOneLineMessage(Throwable exception) {
String message = exception.getMessage();
if (message != null) {
int i = message.indexOf(ENTER);
if (i > -1) {
message = message.substring(0, i);
}
}
return message;
}
/**
* Maps different exceptions thrown by KMS to HTTP status codes.
*/
@Override
public Response toResponse(Exception exception) {
Response.Status status;
boolean doAudit = true;
Throwable throwable = exception;
if (exception instanceof ContainerException) {
throwable = exception.getCause();
}
if (throwable instanceof SecurityException) {
status = Response.Status.FORBIDDEN;
} else if (throwable instanceof AuthenticationException) {
status = Response.Status.FORBIDDEN;
// we don't audit here because we did it already when checking access
doAudit = false;
} else if (throwable instanceof AuthorizationException) {
status = Response.Status.FORBIDDEN;
// we don't audit here because we did it already when checking access
doAudit = false;
} else if (throwable instanceof AccessControlException) {
status = Response.Status.FORBIDDEN;
} else if (exception instanceof IOException) {
status = Response.Status.INTERNAL_SERVER_ERROR;
} else if (exception instanceof UnsupportedOperationException) {
status = Response.Status.BAD_REQUEST;
} else if (exception instanceof IllegalArgumentException) {
status = Response.Status.BAD_REQUEST;
} else {
status = Response.Status.INTERNAL_SERVER_ERROR;
}
if (doAudit) {
KMSWebApp.getKMSAudit().error(KMSMDCFilter.getUgi(),
KMSMDCFilter.getMethod(),
KMSMDCFilter.getURL(), getOneLineMessage(exception));
}
return createResponse(status, throwable);
}
protected void log(Response.Status status, Throwable ex) {
UserGroupInformation ugi = KMSMDCFilter.getUgi();
String method = KMSMDCFilter.getMethod();
String url = KMSMDCFilter.getURL();
String msg = getOneLineMessage(ex);
LOG.warn("User:'{}' Method:{} URL:{} Response:{}-{}", ugi, method, url,
status, msg, ex);
}
}
| 4,191 | 35.77193 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONReader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.codehaus.jackson.map.ObjectMapper;
import javax.ws.rs.Consumes;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.ext.MessageBodyReader;
import javax.ws.rs.ext.Provider;
import java.io.IOException;
import java.io.InputStream;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
import java.util.Map;
@Provider
@Consumes(MediaType.APPLICATION_JSON)
@InterfaceAudience.Private
public class KMSJSONReader implements MessageBodyReader<Map> {
@Override
public boolean isReadable(Class<?> type, Type genericType,
Annotation[] annotations, MediaType mediaType) {
return type.isAssignableFrom(Map.class);
}
@Override
public Map readFrom(Class<Map> type, Type genericType,
Annotation[] annotations, MediaType mediaType,
MultivaluedMap<String, String> httpHeaders, InputStream entityStream)
throws IOException, WebApplicationException {
ObjectMapper mapper = new ObjectMapper();
return mapper.readValue(entityStream, type);
}
}
| 2,004 | 35.454545 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.security.NoSuchAlgorithmException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
/**
* A {@link KeyProvider} proxy that checks whether the current user derived via
* {@link UserGroupInformation}, is authorized to perform the following
* type of operations on a Key :
* <ol>
* <li>MANAGEMENT operations : createKey, rollNewVersion, deleteKey</li>
* <li>GENERATE_EEK operations : generateEncryptedKey, warmUpEncryptedKeys</li>
* <li>DECRYPT_EEK operation : decryptEncryptedKey</li>
* <li>READ operations : getKeyVersion, getKeyVersions, getMetadata,
* getKeysMetadata, getCurrentKey</li>
* </ol>
* The read operations (getCurrentKeyVersion / getMetadata) etc are not checked.
*/
public class KeyAuthorizationKeyProvider extends KeyProviderCryptoExtension {
public static final String KEY_ACL = "key.acl.";
private static final String KEY_ACL_NAME = KEY_ACL + "name";
public enum KeyOpType {
ALL, READ, MANAGEMENT, GENERATE_EEK, DECRYPT_EEK;
}
/**
* Interface that needs to be implemented by a client of the
* <code>KeyAuthorizationKeyProvider</code>.
*/
public static interface KeyACLs {
/**
* This is called by the KeyProvider to check if the given user is
* authorized to perform the specified operation on the given acl name.
* @param aclName name of the key ACL
* @param ugi User's UserGroupInformation
* @param opType Operation Type
* @return true if user has access to the aclName and opType else false
*/
public boolean hasAccessToKey(String aclName, UserGroupInformation ugi,
KeyOpType opType);
/**
*
* @param aclName ACL name
* @param opType Operation Type
* @return true if AclName exists else false
*/
public boolean isACLPresent(String aclName, KeyOpType opType);
}
private final KeyProviderCryptoExtension provider;
private final KeyACLs acls;
private Lock readLock;
private Lock writeLock;
/**
* The constructor takes a {@link KeyProviderCryptoExtension} and an
* implementation of <code>KeyACLs</code>. All calls are delegated to the
* provider keyProvider after authorization check (if required)
* @param keyProvider the key provider
* @param acls the Key ACLs
*/
public KeyAuthorizationKeyProvider(KeyProviderCryptoExtension keyProvider,
KeyACLs acls) {
super(keyProvider, null);
this.provider = keyProvider;
this.acls = acls;
ReadWriteLock lock = new ReentrantReadWriteLock(true);
readLock = lock.readLock();
writeLock = lock.writeLock();
}
// This method first checks if "key.acl.name" attribute is present as an
// attribute in the provider Options. If yes, use the aclName for any
// subsequent access checks, else use the keyName as the aclName and set it
// as the value of the "key.acl.name" in the key's metadata.
private void authorizeCreateKey(String keyName, Options options,
UserGroupInformation ugi) throws IOException{
Preconditions.checkNotNull(ugi, "UserGroupInformation cannot be null");
Map<String, String> attributes = options.getAttributes();
String aclName = attributes.get(KEY_ACL_NAME);
boolean success = false;
if (Strings.isNullOrEmpty(aclName)) {
if (acls.isACLPresent(keyName, KeyOpType.MANAGEMENT)) {
options.setAttributes(ImmutableMap.<String, String> builder()
.putAll(attributes).put(KEY_ACL_NAME, keyName).build());
success =
acls.hasAccessToKey(keyName, ugi, KeyOpType.MANAGEMENT)
|| acls.hasAccessToKey(keyName, ugi, KeyOpType.ALL);
} else {
success = false;
}
} else {
success = acls.isACLPresent(aclName, KeyOpType.MANAGEMENT) &&
(acls.hasAccessToKey(aclName, ugi, KeyOpType.MANAGEMENT)
|| acls.hasAccessToKey(aclName, ugi, KeyOpType.ALL));
}
if (!success)
throw new AuthorizationException(String.format("User [%s] is not"
+ " authorized to create key !!", ugi.getShortUserName()));
}
private void checkAccess(String aclName, UserGroupInformation ugi,
KeyOpType opType) throws AuthorizationException {
Preconditions.checkNotNull(aclName, "Key ACL name cannot be null");
Preconditions.checkNotNull(ugi, "UserGroupInformation cannot be null");
if (acls.isACLPresent(aclName, opType) &&
(acls.hasAccessToKey(aclName, ugi, opType)
|| acls.hasAccessToKey(aclName, ugi, KeyOpType.ALL))) {
return;
} else {
throw new AuthorizationException(String.format("User [%s] is not"
+ " authorized to perform [%s] on key with ACL name [%s]!!",
ugi.getShortUserName(), opType, aclName));
}
}
@Override
public KeyVersion createKey(String name, Options options)
throws NoSuchAlgorithmException, IOException {
writeLock.lock();
try {
authorizeCreateKey(name, options, getUser());
return provider.createKey(name, options);
} finally {
writeLock.unlock();
}
}
@Override
public KeyVersion createKey(String name, byte[] material, Options options)
throws IOException {
writeLock.lock();
try {
authorizeCreateKey(name, options, getUser());
return provider.createKey(name, material, options);
} finally {
writeLock.unlock();
}
}
@Override
public KeyVersion rollNewVersion(String name)
throws NoSuchAlgorithmException, IOException {
writeLock.lock();
try {
doAccessCheck(name, KeyOpType.MANAGEMENT);
return provider.rollNewVersion(name);
} finally {
writeLock.unlock();
}
}
@Override
public void deleteKey(String name) throws IOException {
writeLock.lock();
try {
doAccessCheck(name, KeyOpType.MANAGEMENT);
provider.deleteKey(name);
} finally {
writeLock.unlock();
}
}
@Override
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
writeLock.lock();
try {
doAccessCheck(name, KeyOpType.MANAGEMENT);
return provider.rollNewVersion(name, material);
} finally {
writeLock.unlock();
}
}
@Override
public void warmUpEncryptedKeys(String... names) throws IOException {
readLock.lock();
try {
for (String name : names) {
doAccessCheck(name, KeyOpType.GENERATE_EEK);
}
provider.warmUpEncryptedKeys(names);
} finally {
readLock.unlock();
}
}
@Override
public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
throws IOException, GeneralSecurityException {
readLock.lock();
try {
doAccessCheck(encryptionKeyName, KeyOpType.GENERATE_EEK);
return provider.generateEncryptedKey(encryptionKeyName);
} finally {
readLock.unlock();
}
}
private void verifyKeyVersionBelongsToKey(EncryptedKeyVersion ekv)
throws IOException {
String kn = ekv.getEncryptionKeyName();
String kvn = ekv.getEncryptionKeyVersionName();
KeyVersion kv = provider.getKeyVersion(kvn);
if (kv == null) {
throw new IllegalArgumentException(String.format(
"'%s' not found", kvn));
}
if (!kv.getName().equals(kn)) {
throw new IllegalArgumentException(String.format(
"KeyVersion '%s' does not belong to the key '%s'", kvn, kn));
}
}
@Override
public KeyVersion decryptEncryptedKey(EncryptedKeyVersion encryptedKeyVersion)
throws IOException, GeneralSecurityException {
readLock.lock();
try {
verifyKeyVersionBelongsToKey(encryptedKeyVersion);
doAccessCheck(
encryptedKeyVersion.getEncryptionKeyName(), KeyOpType.DECRYPT_EEK);
return provider.decryptEncryptedKey(encryptedKeyVersion);
} finally {
readLock.unlock();
}
}
@Override
public KeyVersion getKeyVersion(String versionName) throws IOException {
readLock.lock();
try {
KeyVersion keyVersion = provider.getKeyVersion(versionName);
if (keyVersion != null) {
doAccessCheck(keyVersion.getName(), KeyOpType.READ);
}
return keyVersion;
} finally {
readLock.unlock();
}
}
@Override
public List<String> getKeys() throws IOException {
return provider.getKeys();
}
@Override
public List<KeyVersion> getKeyVersions(String name) throws IOException {
readLock.lock();
try {
doAccessCheck(name, KeyOpType.READ);
return provider.getKeyVersions(name);
} finally {
readLock.unlock();
}
}
@Override
public Metadata getMetadata(String name) throws IOException {
readLock.lock();
try {
doAccessCheck(name, KeyOpType.READ);
return provider.getMetadata(name);
} finally {
readLock.unlock();
}
}
@Override
public Metadata[] getKeysMetadata(String... names) throws IOException {
readLock.lock();
try {
for (String name : names) {
doAccessCheck(name, KeyOpType.READ);
}
return provider.getKeysMetadata(names);
} finally {
readLock.unlock();
}
}
@Override
public KeyVersion getCurrentKey(String name) throws IOException {
readLock.lock();
try {
doAccessCheck(name, KeyOpType.READ);
return provider.getCurrentKey(name);
} finally {
readLock.unlock();
}
}
@Override
public void flush() throws IOException {
provider.flush();
}
@Override
public boolean isTransient() {
return provider.isTransient();
}
private void doAccessCheck(String keyName, KeyOpType opType) throws
IOException {
Metadata metadata = provider.getMetadata(keyName);
if (metadata != null) {
String aclName = metadata.getAttributes().get(KEY_ACL_NAME);
checkAccess((aclName == null) ? keyName : aclName, getUser(), opType);
}
}
private UserGroupInformation getUser() throws IOException {
return UserGroupInformation.getCurrentUser();
}
@Override
protected KeyProvider getKeyProvider() {
return this;
}
@Override
public String toString() {
return provider.toString();
}
}
| 11,514 | 30.461749 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSServerJSONUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* JSON utility methods for the KMS.
*/
@InterfaceAudience.Private
public class KMSServerJSONUtils {
@SuppressWarnings("unchecked")
public static Map toJSON(KeyProvider.KeyVersion keyVersion) {
Map json = new LinkedHashMap();
if (keyVersion != null) {
json.put(KMSRESTConstants.NAME_FIELD,
keyVersion.getName());
json.put(KMSRESTConstants.VERSION_NAME_FIELD,
keyVersion.getVersionName());
json.put(KMSRESTConstants.MATERIAL_FIELD,
Base64.encodeBase64URLSafeString(
keyVersion.getMaterial()));
}
return json;
}
@SuppressWarnings("unchecked")
public static List toJSON(List<KeyProvider.KeyVersion> keyVersions) {
List json = new ArrayList();
if (keyVersions != null) {
for (KeyProvider.KeyVersion version : keyVersions) {
json.add(toJSON(version));
}
}
return json;
}
@SuppressWarnings("unchecked")
public static Map toJSON(EncryptedKeyVersion encryptedKeyVersion) {
Map json = new LinkedHashMap();
if (encryptedKeyVersion != null) {
json.put(KMSRESTConstants.VERSION_NAME_FIELD,
encryptedKeyVersion.getEncryptionKeyVersionName());
json.put(KMSRESTConstants.IV_FIELD,
Base64.encodeBase64URLSafeString(
encryptedKeyVersion.getEncryptedKeyIv()));
json.put(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD,
toJSON(encryptedKeyVersion.getEncryptedKeyVersion()));
}
return json;
}
@SuppressWarnings("unchecked")
public static Map toJSON(String keyName, KeyProvider.Metadata meta) {
Map json = new LinkedHashMap();
if (meta != null) {
json.put(KMSRESTConstants.NAME_FIELD, keyName);
json.put(KMSRESTConstants.CIPHER_FIELD, meta.getCipher());
json.put(KMSRESTConstants.LENGTH_FIELD, meta.getBitLength());
json.put(KMSRESTConstants.DESCRIPTION_FIELD, meta.getDescription());
json.put(KMSRESTConstants.ATTRIBUTES_FIELD, meta.getAttributes());
json.put(KMSRESTConstants.CREATED_FIELD,
meta.getCreated().getTime());
json.put(KMSRESTConstants.VERSIONS_FIELD,
(long) meta.getVersions());
}
return json;
}
@SuppressWarnings("unchecked")
public static List toJSON(String[] keyNames, KeyProvider.Metadata[] metas) {
List json = new ArrayList();
for (int i = 0; i < keyNames.length; i++) {
json.add(toJSON(keyNames[i], metas[i]));
}
return json;
}
}
| 3,719 | 35.116505 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/RequestLoggerFilter.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.examples;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpServletResponseWrapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Servlet filter that logs HTTP request/response headers
*/
public class RequestLoggerFilter implements Filter {
private static Logger LOG = LoggerFactory.getLogger(RequestLoggerFilter.class);
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
throws IOException, ServletException {
if (!LOG.isDebugEnabled()) {
filterChain.doFilter(request, response);
}
else {
XHttpServletRequest xRequest = new XHttpServletRequest((HttpServletRequest) request);
XHttpServletResponse xResponse = new XHttpServletResponse((HttpServletResponse) response);
try {
LOG.debug(xRequest.getResquestInfo().toString());
filterChain.doFilter(xRequest, xResponse);
}
finally {
LOG.debug(xResponse.getResponseInfo().toString());
}
}
}
@Override
public void destroy() {
}
private static class XHttpServletRequest extends HttpServletRequestWrapper {
public XHttpServletRequest(HttpServletRequest request) {
super(request);
}
public StringBuffer getResquestInfo() {
StringBuffer sb = new StringBuffer(512);
sb.append("\n").append("> ").append(getMethod()).append(" ").append(getRequestURL());
if (getQueryString() != null) {
sb.append("?").append(getQueryString());
}
sb.append("\n");
Enumeration names = getHeaderNames();
while (names.hasMoreElements()) {
String name = (String) names.nextElement();
Enumeration values = getHeaders(name);
while (values.hasMoreElements()) {
String value = (String) values.nextElement();
sb.append("> ").append(name).append(": ").append(value).append("\n");
}
}
sb.append(">");
return sb;
}
}
private static class XHttpServletResponse extends HttpServletResponseWrapper {
private Map<String, List<String>> headers = new HashMap<String, List<String>>();
private int status;
private String message;
public XHttpServletResponse(HttpServletResponse response) {
super(response);
}
private List<String> getHeaderValues(String name, boolean reset) {
List<String> values = headers.get(name);
if (reset || values == null) {
values = new ArrayList<String>();
headers.put(name, values);
}
return values;
}
@Override
public void addCookie(Cookie cookie) {
super.addCookie(cookie);
List<String> cookies = getHeaderValues("Set-Cookie", false);
cookies.add(cookie.getName() + "=" + cookie.getValue());
}
@Override
public void sendError(int sc, String msg) throws IOException {
super.sendError(sc, msg);
status = sc;
message = msg;
}
@Override
public void sendError(int sc) throws IOException {
super.sendError(sc);
status = sc;
}
@Override
public void setStatus(int sc) {
super.setStatus(sc);
status = sc;
}
@Override
public void setStatus(int sc, String msg) {
super.setStatus(sc, msg);
status = sc;
message = msg;
}
@Override
public void setHeader(String name, String value) {
super.setHeader(name, value);
List<String> values = getHeaderValues(name, true);
values.add(value);
}
@Override
public void addHeader(String name, String value) {
super.addHeader(name, value);
List<String> values = getHeaderValues(name, false);
values.add(value);
}
public StringBuffer getResponseInfo() {
if (status == 0) {
status = 200;
message = "OK";
}
StringBuffer sb = new StringBuffer(512);
sb.append("\n").append("< ").append("status code: ").append(status);
if (message != null) {
sb.append(", message: ").append(message);
}
sb.append("\n");
for (Map.Entry<String, List<String>> entry : headers.entrySet()) {
for (String value : entry.getValue()) {
sb.append("< ").append(entry.getKey()).append(": ").append(value).append("\n");
}
}
sb.append("<");
return sb;
}
}
}
| 5,569 | 29.271739 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoServlet.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.examples;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.Writer;
import java.text.MessageFormat;
/**
* Example servlet that returns the user and principal of the request.
*/
public class WhoServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.setContentType("text/plain");
resp.setStatus(HttpServletResponse.SC_OK);
String user = req.getRemoteUser();
String principal = (req.getUserPrincipal() != null) ? req.getUserPrincipal().getName() : null;
Writer writer = resp.getWriter();
writer.write(MessageFormat.format("You are: user[{0}] principal[{1}]\n", user, principal));
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
doGet(req, resp);
}
}
| 1,674 | 37.068182 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.examples;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.nio.charset.Charset;
/**
* Example that uses <code>AuthenticatedURL</code>.
*/
public class WhoClient {
public static void main(String[] args) {
try {
if (args.length != 1) {
System.err.println("Usage: <URL>");
System.exit(-1);
}
AuthenticatedURL.Token token = new AuthenticatedURL.Token();
URL url = new URL(args[0]);
HttpURLConnection conn = new AuthenticatedURL().openConnection(url, token);
System.out.println();
System.out.println("Token value: " + token);
System.out.println("Status code: " + conn.getResponseCode() + " " + conn.getResponseMessage());
System.out.println();
if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
BufferedReader reader = new BufferedReader(
new InputStreamReader(
conn.getInputStream(), Charset.forName("UTF-8")));
String line = reader.readLine();
while (line != null) {
System.out.println(line);
line = reader.readLine();
}
reader.close();
}
System.out.println();
}
catch (Exception ex) {
System.err.println("ERROR: " + ex.getMessage());
System.exit(-1);
}
}
}
| 2,077 | 33.065574 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.minikdc;
import org.apache.directory.server.kerberos.shared.keytab.Keytab;
import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
import org.junit.Assert;
import org.junit.Test;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import java.io.File;
import java.security.Principal;
import java.util.Set;
import java.util.Map;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Arrays;
public class TestMiniKdc extends KerberosSecurityTestcase {
private static final boolean IBM_JAVA = System.getProperty("java.vendor")
.contains("IBM");
@Test
public void testMiniKdcStart() {
MiniKdc kdc = getKdc();
Assert.assertNotSame(0, kdc.getPort());
}
@Test
public void testKeytabGen() throws Exception {
MiniKdc kdc = getKdc();
File workDir = getWorkDir();
kdc.createPrincipal(new File(workDir, "keytab"), "foo/bar", "bar/foo");
Keytab kt = Keytab.read(new File(workDir, "keytab"));
Set<String> principals = new HashSet<String>();
for (KeytabEntry entry : kt.getEntries()) {
principals.add(entry.getPrincipalName());
}
//here principals use \ instead of /
//because org.apache.directory.server.kerberos.shared.keytab.KeytabDecoder
// .getPrincipalName(IoBuffer buffer) use \\ when generates principal
Assert.assertEquals(new HashSet<String>(Arrays.asList(
"foo\\bar@" + kdc.getRealm(), "bar\\foo@" + kdc.getRealm())),
principals);
}
private static class KerberosConfiguration extends Configuration {
private String principal;
private String keytab;
private boolean isInitiator;
private KerberosConfiguration(String principal, File keytab,
boolean client) {
this.principal = principal;
this.keytab = keytab.getAbsolutePath();
this.isInitiator = client;
}
public static Configuration createClientConfig(String principal,
File keytab) {
return new KerberosConfiguration(principal, keytab, true);
}
public static Configuration createServerConfig(String principal,
File keytab) {
return new KerberosConfiguration(principal, keytab, false);
}
private static String getKrb5LoginModuleName() {
return System.getProperty("java.vendor").contains("IBM")
? "com.ibm.security.auth.module.Krb5LoginModule"
: "com.sun.security.auth.module.Krb5LoginModule";
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
options.put("principal", principal);
options.put("refreshKrb5Config", "true");
if (IBM_JAVA) {
options.put("useKeytab", keytab);
options.put("credsType", "both");
} else {
options.put("keyTab", keytab);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("isInitiator", Boolean.toString(isInitiator));
}
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry(getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options)};
}
}
@Test
public void testKerberosLogin() throws Exception {
MiniKdc kdc = getKdc();
File workDir = getWorkDir();
LoginContext loginContext = null;
try {
String principal = "foo";
File keytab = new File(workDir, "foo.keytab");
kdc.createPrincipal(keytab, principal);
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(principal));
//client login
Subject subject = new Subject(false, principals, new HashSet<Object>(),
new HashSet<Object>());
loginContext = new LoginContext("", subject, null,
KerberosConfiguration.createClientConfig(principal, keytab));
loginContext.login();
subject = loginContext.getSubject();
Assert.assertEquals(1, subject.getPrincipals().size());
Assert.assertEquals(KerberosPrincipal.class,
subject.getPrincipals().iterator().next().getClass());
Assert.assertEquals(principal + "@" + kdc.getRealm(),
subject.getPrincipals().iterator().next().getName());
loginContext.logout();
//server login
subject = new Subject(false, principals, new HashSet<Object>(),
new HashSet<Object>());
loginContext = new LoginContext("", subject, null,
KerberosConfiguration.createServerConfig(principal, keytab));
loginContext.login();
subject = loginContext.getSubject();
Assert.assertEquals(1, subject.getPrincipals().size());
Assert.assertEquals(KerberosPrincipal.class,
subject.getPrincipals().iterator().next().getClass());
Assert.assertEquals(principal + "@" + kdc.getRealm(),
subject.getPrincipals().iterator().next().getName());
loginContext.logout();
} finally {
if (loginContext != null) {
loginContext.logout();
}
}
}
}
| 6,537 | 36.574713 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestChangeOrgNameAndDomain.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.minikdc;
import java.util.Properties;
public class TestChangeOrgNameAndDomain extends TestMiniKdc {
@Override
public void createMiniKdcConf() {
super.createMiniKdcConf();
Properties properties = getConf();
properties.setProperty(MiniKdc.ORG_NAME, "APACHE");
properties.setProperty(MiniKdc.ORG_DOMAIN, "COM");
}
}
| 1,172 | 34.545455 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.minikdc;
import org.apache.commons.io.Charsets;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.text.StrSubstitutor;
import org.apache.directory.api.ldap.model.schema.SchemaManager;
import org.apache.directory.api.ldap.schemaextractor.SchemaLdifExtractor;
import org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor;
import org.apache.directory.api.ldap.schemaloader.LdifSchemaLoader;
import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager;
import org.apache.directory.server.constants.ServerDNConstants;
import org.apache.directory.server.core.DefaultDirectoryService;
import org.apache.directory.server.core.api.CacheService;
import org.apache.directory.server.core.api.DirectoryService;
import org.apache.directory.server.core.api.InstanceLayout;
import org.apache.directory.server.core.api.schema.SchemaPartition;
import org.apache.directory.server.core.kerberos.KeyDerivationInterceptor;
import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmIndex;
import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition;
import org.apache.directory.server.core.partition.ldif.LdifPartition;
import org.apache.directory.server.kerberos.KerberosConfig;
import org.apache.directory.server.kerberos.kdc.KdcServer;
import org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory;
import org.apache.directory.server.kerberos.shared.keytab.Keytab;
import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
import org.apache.directory.server.protocol.shared.transport.TcpTransport;
import org.apache.directory.server.protocol.shared.transport.UdpTransport;
import org.apache.directory.server.xdbm.Index;
import org.apache.directory.shared.kerberos.KerberosTime;
import org.apache.directory.shared.kerberos.codec.types.EncryptionType;
import org.apache.directory.shared.kerberos.components.EncryptionKey;
import org.apache.directory.api.ldap.model.entry.DefaultEntry;
import org.apache.directory.api.ldap.model.entry.Entry;
import org.apache.directory.api.ldap.model.ldif.LdifEntry;
import org.apache.directory.api.ldap.model.ldif.LdifReader;
import org.apache.directory.api.ldap.model.name.Dn;
import org.apache.directory.api.ldap.model.schema.registries.SchemaLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.lang.reflect.Method;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
/**
* Mini KDC based on Apache Directory Server that can be embedded in testcases
* or used from command line as a standalone KDC.
* <p>
* <b>From within testcases:</b>
* <p>
* MiniKdc sets 2 System properties when started and un-sets them when stopped:
* <ul>
* <li>java.security.krb5.conf: set to the MiniKDC real/host/port</li>
* <li>sun.security.krb5.debug: set to the debug value provided in the
* configuration</li>
* </ul>
* Because of this, multiple MiniKdc instances cannot be started in parallel.
* For example, running testcases in parallel that start a KDC each. To
* accomplish this a single MiniKdc should be used for all testcases running
* in parallel.
* <p>
* MiniKdc default configuration values are:
* <ul>
* <li>org.name=EXAMPLE (used to create the REALM)</li>
* <li>org.domain=COM (used to create the REALM)</li>
* <li>kdc.bind.address=localhost</li>
* <li>kdc.port=0 (ephemeral port)</li>
* <li>instance=DefaultKrbServer</li>
* <li>max.ticket.lifetime=86400000 (1 day)</li>
* <li>max.renewable.lifetime=604800000 (7 days)</li>
* <li>transport=TCP</li>
* <li>debug=false</li>
* </ul>
* The generated krb5.conf forces TCP connections.
*/
public class MiniKdc {
public static final String JAVA_SECURITY_KRB5_CONF =
"java.security.krb5.conf";
public static final String SUN_SECURITY_KRB5_DEBUG =
"sun.security.krb5.debug";
public static void main(String[] args) throws Exception {
if (args.length < 4) {
System.out.println("Arguments: <WORKDIR> <MINIKDCPROPERTIES> " +
"<KEYTABFILE> [<PRINCIPALS>]+");
System.exit(1);
}
File workDir = new File(args[0]);
if (!workDir.exists()) {
throw new RuntimeException("Specified work directory does not exists: "
+ workDir.getAbsolutePath());
}
Properties conf = createConf();
File file = new File(args[1]);
if (!file.exists()) {
throw new RuntimeException("Specified configuration does not exists: "
+ file.getAbsolutePath());
}
Properties userConf = new Properties();
InputStreamReader r = null;
try {
r = new InputStreamReader(new FileInputStream(file), Charsets.UTF_8);
userConf.load(r);
} finally {
if (r != null) {
r.close();
}
}
for (Map.Entry<?, ?> entry : userConf.entrySet()) {
conf.put(entry.getKey(), entry.getValue());
}
final MiniKdc miniKdc = new MiniKdc(conf, workDir);
miniKdc.start();
File krb5conf = new File(workDir, "krb5.conf");
if (miniKdc.getKrb5conf().renameTo(krb5conf)) {
File keytabFile = new File(args[2]).getAbsoluteFile();
String[] principals = new String[args.length - 3];
System.arraycopy(args, 3, principals, 0, args.length - 3);
miniKdc.createPrincipal(keytabFile, principals);
System.out.println();
System.out.println("Standalone MiniKdc Running");
System.out.println("---------------------------------------------------");
System.out.println(" Realm : " + miniKdc.getRealm());
System.out.println(" Running at : " + miniKdc.getHost() + ":" +
miniKdc.getHost());
System.out.println(" krb5conf : " + krb5conf);
System.out.println();
System.out.println(" created keytab : " + keytabFile);
System.out.println(" with principals : " + Arrays.asList(principals));
System.out.println();
System.out.println(" Do <CTRL-C> or kill <PID> to stop it");
System.out.println("---------------------------------------------------");
System.out.println();
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
miniKdc.stop();
}
});
} else {
throw new RuntimeException("Cannot rename KDC's krb5conf to "
+ krb5conf.getAbsolutePath());
}
}
private static final Logger LOG = LoggerFactory.getLogger(MiniKdc.class);
public static final String ORG_NAME = "org.name";
public static final String ORG_DOMAIN = "org.domain";
public static final String KDC_BIND_ADDRESS = "kdc.bind.address";
public static final String KDC_PORT = "kdc.port";
public static final String INSTANCE = "instance";
public static final String MAX_TICKET_LIFETIME = "max.ticket.lifetime";
public static final String MAX_RENEWABLE_LIFETIME = "max.renewable.lifetime";
public static final String TRANSPORT = "transport";
public static final String DEBUG = "debug";
private static final Set<String> PROPERTIES = new HashSet<String>();
private static final Properties DEFAULT_CONFIG = new Properties();
static {
PROPERTIES.add(ORG_NAME);
PROPERTIES.add(ORG_DOMAIN);
PROPERTIES.add(KDC_BIND_ADDRESS);
PROPERTIES.add(KDC_BIND_ADDRESS);
PROPERTIES.add(KDC_PORT);
PROPERTIES.add(INSTANCE);
PROPERTIES.add(TRANSPORT);
PROPERTIES.add(MAX_TICKET_LIFETIME);
PROPERTIES.add(MAX_RENEWABLE_LIFETIME);
DEFAULT_CONFIG.setProperty(KDC_BIND_ADDRESS, "localhost");
DEFAULT_CONFIG.setProperty(KDC_PORT, "0");
DEFAULT_CONFIG.setProperty(INSTANCE, "DefaultKrbServer");
DEFAULT_CONFIG.setProperty(ORG_NAME, "EXAMPLE");
DEFAULT_CONFIG.setProperty(ORG_DOMAIN, "COM");
DEFAULT_CONFIG.setProperty(TRANSPORT, "TCP");
DEFAULT_CONFIG.setProperty(MAX_TICKET_LIFETIME, "86400000");
DEFAULT_CONFIG.setProperty(MAX_RENEWABLE_LIFETIME, "604800000");
DEFAULT_CONFIG.setProperty(DEBUG, "false");
}
/**
* Convenience method that returns MiniKdc default configuration.
* <p>
* The returned configuration is a copy, it can be customized before using
* it to create a MiniKdc.
* @return a MiniKdc default configuration.
*/
public static Properties createConf() {
return (Properties) DEFAULT_CONFIG.clone();
}
private Properties conf;
private DirectoryService ds;
private KdcServer kdc;
private int port;
private String realm;
private File workDir;
private File krb5conf;
/**
* Creates a MiniKdc.
*
* @param conf MiniKdc configuration.
* @param workDir working directory, it should be the build directory. Under
* this directory an ApacheDS working directory will be created, this
* directory will be deleted when the MiniKdc stops.
* @throws Exception thrown if the MiniKdc could not be created.
*/
public MiniKdc(Properties conf, File workDir) throws Exception {
if (!conf.keySet().containsAll(PROPERTIES)) {
Set<String> missingProperties = new HashSet<String>(PROPERTIES);
missingProperties.removeAll(conf.keySet());
throw new IllegalArgumentException("Missing configuration properties: "
+ missingProperties);
}
this.workDir = new File(workDir, Long.toString(System.currentTimeMillis()));
if (! workDir.exists()
&& ! workDir.mkdirs()) {
throw new RuntimeException("Cannot create directory " + workDir);
}
LOG.info("Configuration:");
LOG.info("---------------------------------------------------------------");
for (Map.Entry<?, ?> entry : conf.entrySet()) {
LOG.info(" {}: {}", entry.getKey(), entry.getValue());
}
LOG.info("---------------------------------------------------------------");
this.conf = conf;
port = Integer.parseInt(conf.getProperty(KDC_PORT));
if (port == 0) {
ServerSocket ss = new ServerSocket(0, 1, InetAddress.getByName
(conf.getProperty(KDC_BIND_ADDRESS)));
port = ss.getLocalPort();
ss.close();
}
String orgName= conf.getProperty(ORG_NAME);
String orgDomain = conf.getProperty(ORG_DOMAIN);
realm = orgName.toUpperCase(Locale.ENGLISH) + "."
+ orgDomain.toUpperCase(Locale.ENGLISH);
}
/**
* Returns the port of the MiniKdc.
*
* @return the port of the MiniKdc.
*/
public int getPort() {
return port;
}
/**
* Returns the host of the MiniKdc.
*
* @return the host of the MiniKdc.
*/
public String getHost() {
return conf.getProperty(KDC_BIND_ADDRESS);
}
/**
* Returns the realm of the MiniKdc.
*
* @return the realm of the MiniKdc.
*/
public String getRealm() {
return realm;
}
public File getKrb5conf() {
return krb5conf;
}
/**
* Starts the MiniKdc.
*
* @throws Exception thrown if the MiniKdc could not be started.
*/
public synchronized void start() throws Exception {
if (kdc != null) {
throw new RuntimeException("Already started");
}
initDirectoryService();
initKDCServer();
}
private void initDirectoryService() throws Exception {
ds = new DefaultDirectoryService();
ds.setInstanceLayout(new InstanceLayout(workDir));
CacheService cacheService = new CacheService();
ds.setCacheService(cacheService);
// first load the schema
InstanceLayout instanceLayout = ds.getInstanceLayout();
File schemaPartitionDirectory = new File(
instanceLayout.getPartitionsDirectory(), "schema");
SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor(
instanceLayout.getPartitionsDirectory());
extractor.extractOrCopy();
SchemaLoader loader = new LdifSchemaLoader(schemaPartitionDirectory);
SchemaManager schemaManager = new DefaultSchemaManager(loader);
schemaManager.loadAllEnabled();
ds.setSchemaManager(schemaManager);
// Init the LdifPartition with schema
LdifPartition schemaLdifPartition = new LdifPartition(schemaManager);
schemaLdifPartition.setPartitionPath(schemaPartitionDirectory.toURI());
// The schema partition
SchemaPartition schemaPartition = new SchemaPartition(schemaManager);
schemaPartition.setWrappedPartition(schemaLdifPartition);
ds.setSchemaPartition(schemaPartition);
JdbmPartition systemPartition = new JdbmPartition(ds.getSchemaManager());
systemPartition.setId("system");
systemPartition.setPartitionPath(new File(
ds.getInstanceLayout().getPartitionsDirectory(),
systemPartition.getId()).toURI());
systemPartition.setSuffixDn(new Dn(ServerDNConstants.SYSTEM_DN));
systemPartition.setSchemaManager(ds.getSchemaManager());
ds.setSystemPartition(systemPartition);
ds.getChangeLog().setEnabled(false);
ds.setDenormalizeOpAttrsEnabled(true);
ds.addLast(new KeyDerivationInterceptor());
// create one partition
String orgName= conf.getProperty(ORG_NAME).toLowerCase(Locale.ENGLISH);
String orgDomain = conf.getProperty(ORG_DOMAIN).toLowerCase(Locale.ENGLISH);
JdbmPartition partition = new JdbmPartition(ds.getSchemaManager());
partition.setId(orgName);
partition.setPartitionPath(new File(
ds.getInstanceLayout().getPartitionsDirectory(), orgName).toURI());
partition.setSuffixDn(new Dn("dc=" + orgName + ",dc=" + orgDomain));
ds.addPartition(partition);
// indexes
Set<Index<?, ?, String>> indexedAttributes = new HashSet<Index<?, ?, String>>();
indexedAttributes.add(new JdbmIndex<String, Entry>("objectClass", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("dc", false));
indexedAttributes.add(new JdbmIndex<String, Entry>("ou", false));
partition.setIndexedAttributes(indexedAttributes);
// And start the ds
ds.setInstanceId(conf.getProperty(INSTANCE));
ds.startup();
// context entry, after ds.startup()
Dn dn = new Dn("dc=" + orgName + ",dc=" + orgDomain);
Entry entry = ds.newEntry(dn);
entry.add("objectClass", "top", "domain");
entry.add("dc", orgName);
ds.getAdminSession().add(entry);
}
private void initKDCServer() throws Exception {
String orgName= conf.getProperty(ORG_NAME);
String orgDomain = conf.getProperty(ORG_DOMAIN);
String bindAddress = conf.getProperty(KDC_BIND_ADDRESS);
final Map<String, String> map = new HashMap<String, String>();
map.put("0", orgName.toLowerCase(Locale.ENGLISH));
map.put("1", orgDomain.toLowerCase(Locale.ENGLISH));
map.put("2", orgName.toUpperCase(Locale.ENGLISH));
map.put("3", orgDomain.toUpperCase(Locale.ENGLISH));
map.put("4", bindAddress);
ClassLoader cl = Thread.currentThread().getContextClassLoader();
InputStream is1 = cl.getResourceAsStream("minikdc.ldiff");
SchemaManager schemaManager = ds.getSchemaManager();
LdifReader reader = null;
try {
final String content = StrSubstitutor.replace(IOUtils.toString(is1), map);
reader = new LdifReader(new StringReader(content));
for (LdifEntry ldifEntry : reader) {
ds.getAdminSession().add(new DefaultEntry(schemaManager,
ldifEntry.getEntry()));
}
} finally {
IOUtils.closeQuietly(reader);
IOUtils.closeQuietly(is1);
}
KerberosConfig kerberosConfig = new KerberosConfig();
kerberosConfig.setMaximumRenewableLifetime(Long.parseLong(conf
.getProperty(MAX_RENEWABLE_LIFETIME)));
kerberosConfig.setMaximumTicketLifetime(Long.parseLong(conf
.getProperty(MAX_TICKET_LIFETIME)));
kerberosConfig.setSearchBaseDn(String.format("dc=%s,dc=%s", orgName,
orgDomain));
kerberosConfig.setPaEncTimestampRequired(false);
kdc = new KdcServer(kerberosConfig);
kdc.setDirectoryService(ds);
// transport
String transport = conf.getProperty(TRANSPORT);
if (transport.trim().equals("TCP")) {
kdc.addTransports(new TcpTransport(bindAddress, port, 3, 50));
} else if (transport.trim().equals("UDP")) {
kdc.addTransports(new UdpTransport(port));
} else {
throw new IllegalArgumentException("Invalid transport: " + transport);
}
kdc.setServiceName(conf.getProperty(INSTANCE));
kdc.start();
StringBuilder sb = new StringBuilder();
InputStream is2 = cl.getResourceAsStream("minikdc-krb5.conf");
BufferedReader r = null;
try {
r = new BufferedReader(new InputStreamReader(is2, Charsets.UTF_8));
String line = r.readLine();
while (line != null) {
sb.append(line).append("{3}");
line = r.readLine();
}
} finally {
IOUtils.closeQuietly(r);
IOUtils.closeQuietly(is2);
}
krb5conf = new File(workDir, "krb5.conf").getAbsoluteFile();
FileUtils.writeStringToFile(krb5conf,
MessageFormat.format(sb.toString(), getRealm(), getHost(),
Integer.toString(getPort()), System.getProperty("line.separator")));
System.setProperty(JAVA_SECURITY_KRB5_CONF, krb5conf.getAbsolutePath());
System.setProperty(SUN_SECURITY_KRB5_DEBUG, conf.getProperty(DEBUG,
"false"));
// refresh the config
Class<?> classRef;
if (System.getProperty("java.vendor").contains("IBM")) {
classRef = Class.forName("com.ibm.security.krb5.internal.Config");
} else {
classRef = Class.forName("sun.security.krb5.Config");
}
Method refreshMethod = classRef.getMethod("refresh", new Class[0]);
refreshMethod.invoke(classRef, new Object[0]);
LOG.info("MiniKdc listening at port: {}", getPort());
LOG.info("MiniKdc setting JVM krb5.conf to: {}",
krb5conf.getAbsolutePath());
}
/**
* Stops the MiniKdc
*/
public synchronized void stop() {
if (kdc != null) {
System.getProperties().remove(JAVA_SECURITY_KRB5_CONF);
System.getProperties().remove(SUN_SECURITY_KRB5_DEBUG);
kdc.stop();
try {
ds.shutdown();
} catch (Exception ex) {
LOG.error("Could not shutdown ApacheDS properly: {}", ex.toString(),
ex);
}
}
delete(workDir);
}
private void delete(File f) {
if (f.isFile()) {
if (! f.delete()) {
LOG.warn("WARNING: cannot delete file " + f.getAbsolutePath());
}
} else {
for (File c: f.listFiles()) {
delete(c);
}
if (! f.delete()) {
LOG.warn("WARNING: cannot delete directory " + f.getAbsolutePath());
}
}
}
/**
* Creates a principal in the KDC with the specified user and password.
*
* @param principal principal name, do not include the domain.
* @param password password.
* @throws Exception thrown if the principal could not be created.
*/
public synchronized void createPrincipal(String principal, String password)
throws Exception {
String orgName= conf.getProperty(ORG_NAME);
String orgDomain = conf.getProperty(ORG_DOMAIN);
String baseDn = "ou=users,dc=" + orgName.toLowerCase(Locale.ENGLISH)
+ ",dc=" + orgDomain.toLowerCase(Locale.ENGLISH);
String content = "dn: uid=" + principal + "," + baseDn + "\n" +
"objectClass: top\n" +
"objectClass: person\n" +
"objectClass: inetOrgPerson\n" +
"objectClass: krb5principal\n" +
"objectClass: krb5kdcentry\n" +
"cn: " + principal + "\n" +
"sn: " + principal + "\n" +
"uid: " + principal + "\n" +
"userPassword: " + password + "\n" +
"krb5PrincipalName: " + principal + "@" + getRealm() + "\n" +
"krb5KeyVersionNumber: 0";
for (LdifEntry ldifEntry : new LdifReader(new StringReader(content))) {
ds.getAdminSession().add(new DefaultEntry(ds.getSchemaManager(),
ldifEntry.getEntry()));
}
}
/**
* Creates multiple principals in the KDC and adds them to a keytab file.
*
* @param keytabFile keytab file to add the created principal.s
* @param principals principals to add to the KDC, do not include the domain.
* @throws Exception thrown if the principals or the keytab file could not be
* created.
*/
public void createPrincipal(File keytabFile, String ... principals)
throws Exception {
String generatedPassword = UUID.randomUUID().toString();
Keytab keytab = new Keytab();
List<KeytabEntry> entries = new ArrayList<KeytabEntry>();
for (String principal : principals) {
createPrincipal(principal, generatedPassword);
principal = principal + "@" + getRealm();
KerberosTime timestamp = new KerberosTime();
for (Map.Entry<EncryptionType, EncryptionKey> entry : KerberosKeyFactory
.getKerberosKeys(principal, generatedPassword).entrySet()) {
EncryptionKey ekey = entry.getValue();
byte keyVersion = (byte) ekey.getKeyVersion();
entries.add(new KeytabEntry(principal, 1L, timestamp, keyVersion,
ekey));
}
}
keytab.setEntries(entries);
keytab.write(keytabFile);
}
}
| 22,305 | 37.392427 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/KerberosSecurityTestcase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.minikdc;
import org.junit.After;
import org.junit.Before;
import java.io.File;
import java.util.Properties;
/**
* KerberosSecurityTestcase provides a base class for using MiniKdc with other
* testcases. KerberosSecurityTestcase starts the MiniKdc (@Before) before
* running tests, and stop the MiniKdc (@After) after the testcases, using
* default settings (working dir and kdc configurations).
* <p>
* Users can directly inherit this class and implement their own test functions
* using the default settings, or override functions getTestDir() and
* createMiniKdcConf() to provide new settings.
*
*/
public class KerberosSecurityTestcase {
private MiniKdc kdc;
private File workDir;
private Properties conf;
@Before
public void startMiniKdc() throws Exception {
createTestDir();
createMiniKdcConf();
kdc = new MiniKdc(conf, workDir);
kdc.start();
}
/**
* Create a working directory, it should be the build directory. Under
* this directory an ApacheDS working directory will be created, this
* directory will be deleted when the MiniKdc stops.
*/
public void createTestDir() {
workDir = new File(System.getProperty("test.dir", "target"));
}
/**
* Create a Kdc configuration
*/
public void createMiniKdcConf() {
conf = MiniKdc.createConf();
}
@After
public void stopMiniKdc() {
if (kdc != null) {
kdc.stop();
}
}
public MiniKdc getKdc() {
return kdc;
}
public File getWorkDir() {
return workDir;
}
public Properties getConf() {
return conf;
}
}
| 2,410 | 26.712644 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/aop/org/apache/hadoop/fi/FiConfig.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import org.apache.hadoop.conf.Configuration;
/**
* This class wraps the logic around fault injection configuration file
* Default file is expected to be found in src/test/fi-site.xml
* This default file should be copied by JUnit Ant's tasks to
* build/test/extraconf folder before tests are ran
* An alternative location can be set through
* -Dfi.config=<file_name>
*/
public class FiConfig {
private static final String CONFIG_PARAMETER = ProbabilityModel.FPROB_NAME + "config";
private static final String DEFAULT_CONFIG = "fi-site.xml";
private static Configuration conf;
static {
if (conf == null) {
conf = new Configuration(false);
String configName = System.getProperty(CONFIG_PARAMETER, DEFAULT_CONFIG);
conf.addResource(configName);
}
}
/**
* Method provides access to local Configuration
*
* @return Configuration initialized with fault injection's parameters
*/
public static Configuration getConfig() {
return conf;
}
}
| 1,842 | 35.137255 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/aop/org/apache/hadoop/fi/ProbabilityModel.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fi;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
/**
* This class is responsible for the decision of when a fault
* has to be triggered within a class of Hadoop
*
* Default probability of injection is set to 0%. To change it
* one can set the sys. prop. -Dfi.*=<new probability level>
* Another way to do so is to set this level through FI config file,
* located under src/test/fi-site.conf
*
* To change the level one has to specify the following sys,prop.:
* -Dfi.<name of fault location>=<probability level> in the runtime
* Probability level is specified by a float between 0.0 and 1.0
*
* <name of fault location> might be represented by a short classname
* or otherwise. This decision is left up to the discretion of aspects
* developer, but has to be consistent through the code
*/
public class ProbabilityModel {
private static Random generator = new Random();
private static final Log LOG = LogFactory.getLog(ProbabilityModel.class);
static final String FPROB_NAME = "fi.";
private static final String ALL_PROBABILITIES = FPROB_NAME + "*";
private static final float DEFAULT_PROB = 0.00f; //Default probability is 0%
private static final float MAX_PROB = 1.00f; // Max probability is 100%
private static Configuration conf = FiConfig.getConfig();
static {
// Set new default probability if specified through a system.property
// If neither is specified set default probability to DEFAULT_PROB
conf.set(ALL_PROBABILITIES,
System.getProperty(ALL_PROBABILITIES,
conf.get(ALL_PROBABILITIES, Float.toString(DEFAULT_PROB))));
LOG.info(ALL_PROBABILITIES + "=" + conf.get(ALL_PROBABILITIES));
}
/**
* Simplistic method to check if we have reached the point of injection
* @param klassName is the name of the probability level to check.
* If a configuration has been set for "fi.myClass" then you can check if the
* inject criteria has been reached by calling this method with "myClass"
* string as its parameter
* @return true if the probability threshold has been reached; false otherwise
*/
public static boolean injectCriteria(String klassName) {
boolean trigger = false;
if (generator.nextFloat() < getProbability(klassName)) {
trigger = true;
}
return trigger;
}
/**
* This primitive checks for arbitrary set of desired probability. If the
* level hasn't been set method will return default setting.
* The probability expected to be set as an float between 0.0 and 1.0
* @param klass is the name of the resource
* @return float representation of configured probability level of
* the requested resource or default value if hasn't been set
*/
protected static float getProbability(final String klass) {
String newProbName = FPROB_NAME + klass;
String newValue = System.getProperty(newProbName, conf.get(ALL_PROBABILITIES));
if (newValue != null && !newValue.equals(conf.get(newProbName)))
conf.set(newProbName, newValue);
float ret = conf.getFloat(newProbName,
conf.getFloat(ALL_PROBABILITIES, DEFAULT_PROB));
if(LOG.isDebugEnabled()) {
LOG.debug("Request for " + newProbName + " returns=" + ret);
}
// Make sure that probability level is valid.
if (ret < DEFAULT_PROB || ret > MAX_PROB) {
LOG.info("Probability level is incorrect. Default value is set");
ret = conf.getFloat(ALL_PROBABILITIES, DEFAULT_PROB);
}
return ret;
}
}
| 4,436 | 39.706422 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.cli.util.*;
import org.apache.hadoop.cli.util.CommandExecutor.Result;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler;
import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;
import java.io.File;
import java.util.ArrayList;
/**
* Tests for the Command Line Interface (CLI)
*/
public class CLITestHelper {
private static final Log LOG =
LogFactory.getLog(CLITestHelper.class.getName());
// In this mode, it runs the command and compares the actual output
// with the expected output
public static final String TESTMODE_TEST = "test"; // Run the tests
// If it is set to nocompare, run the command and do not compare.
// This can be useful populate the testConfig.xml file the first time
// a new command is added
public static final String TESTMODE_NOCOMPARE = "nocompare";
public static final String TEST_CACHE_DATA_DIR =
System.getProperty("test.cache.data", "build/test/cache");
//By default, run the tests. The other mode is to run the commands and not
// compare the output
protected String testMode = TESTMODE_TEST;
// Storage for tests read in from the config file
protected ArrayList<CLITestData> testsFromConfigFile = null;
protected ArrayList<ComparatorData> testComparators = null;
protected String thisTestCaseName = null;
protected ComparatorData comparatorData = null;
protected Configuration conf = null;
protected String clitestDataDir = null;
protected String username = null;
/**
* Read the test config file - testConfig.xml
*/
protected void readTestConfigFile() {
String testConfigFile = getTestFile();
if (testsFromConfigFile == null) {
boolean success = false;
testConfigFile = TEST_CACHE_DATA_DIR + File.separator + testConfigFile;
try {
SAXParser p = (SAXParserFactory.newInstance()).newSAXParser();
p.parse(testConfigFile, getConfigParser());
success = true;
} catch (Exception e) {
LOG.info("File: " + testConfigFile + " not found");
success = false;
}
assertTrue("Error reading test config file", success);
}
}
/**
* Method decides what is a proper configuration file parser for this type
* of CLI tests.
* Ancestors need to override the implementation if a parser with additional
* features is needed. Also, such ancestor has to provide its own
* TestConfigParser implementation
* @return an instance of TestConfigFileParser class
*/
protected TestConfigFileParser getConfigParser () {
return new TestConfigFileParser();
}
protected String getTestFile() {
return "";
}
/*
* Setup
*/
public void setUp() throws Exception {
// Read the testConfig.xml file
readTestConfigFile();
conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
true);
clitestDataDir = new File(TEST_CACHE_DATA_DIR).
toURI().toString().replace(' ', '+');
}
/**
* Tear down
*/
public void tearDown() throws Exception {
displayResults();
}
/**
* Expand the commands from the test config xml file
* @param cmd
* @return String expanded command
*/
protected String expandCommand(final String cmd) {
String expCmd = cmd;
expCmd = expCmd.replaceAll("CLITEST_DATA", clitestDataDir);
expCmd = expCmd.replaceAll("USERNAME", username);
return expCmd;
}
/**
* Display the summarized results
*/
private void displayResults() {
LOG.info("Detailed results:");
LOG.info("----------------------------------\n");
for (int i = 0; i < testsFromConfigFile.size(); i++) {
CLITestData td = testsFromConfigFile.get(i);
boolean testResult = td.getTestResult();
// Display the details only if there is a failure
if (!testResult) {
LOG.info("-------------------------------------------");
LOG.info(" Test ID: [" + (i + 1) + "]");
LOG.info(" Test Description: [" + td.getTestDesc() + "]");
LOG.info("");
ArrayList<CLICommand> testCommands = td.getTestCommands();
for (CLICommand cmd : testCommands) {
LOG.info(" Test Commands: [" +
expandCommand(cmd.getCmd()) + "]");
}
LOG.info("");
ArrayList<CLICommand> cleanupCommands = td.getCleanupCommands();
for (CLICommand cmd : cleanupCommands) {
LOG.info(" Cleanup Commands: [" +
expandCommand(cmd.getCmd()) + "]");
}
LOG.info("");
ArrayList<ComparatorData> compdata = td.getComparatorData();
for (ComparatorData cd : compdata) {
boolean resultBoolean = cd.getTestResult();
LOG.info(" Comparator: [" +
cd.getComparatorType() + "]");
LOG.info(" Comparision result: [" +
(resultBoolean ? "pass" : "fail") + "]");
LOG.info(" Expected output: [" +
expandCommand(cd.getExpectedOutput()) + "]");
LOG.info(" Actual output: [" +
cd.getActualOutput() + "]");
}
LOG.info("");
}
}
LOG.info("Summary results:");
LOG.info("----------------------------------\n");
boolean overallResults = true;
int totalPass = 0;
int totalFail = 0;
int totalComparators = 0;
for (int i = 0; i < testsFromConfigFile.size(); i++) {
CLITestData td = testsFromConfigFile.get(i);
totalComparators +=
testsFromConfigFile.get(i).getComparatorData().size();
boolean resultBoolean = td.getTestResult();
if (resultBoolean) {
totalPass ++;
} else {
totalFail ++;
}
overallResults &= resultBoolean;
}
LOG.info(" Testing mode: " + testMode);
LOG.info("");
LOG.info(" Overall result: " +
(overallResults ? "+++ PASS +++" : "--- FAIL ---"));
if ((totalPass + totalFail) == 0) {
LOG.info(" # Tests pass: " + 0);
LOG.info(" # Tests fail: " + 0);
}
else
{
LOG.info(" # Tests pass: " + totalPass +
" (" + (100 * totalPass / (totalPass + totalFail)) + "%)");
LOG.info(" # Tests fail: " + totalFail +
" (" + (100 * totalFail / (totalPass + totalFail)) + "%)");
}
LOG.info(" # Validations done: " + totalComparators +
" (each test may do multiple validations)");
LOG.info("");
LOG.info("Failing tests:");
LOG.info("--------------");
int i = 0;
boolean foundTests = false;
for (i = 0; i < testsFromConfigFile.size(); i++) {
boolean resultBoolean = testsFromConfigFile.get(i).getTestResult();
if (!resultBoolean) {
LOG.info((i + 1) + ": " +
testsFromConfigFile.get(i).getTestDesc());
foundTests = true;
}
}
if (!foundTests) {
LOG.info("NONE");
}
foundTests = false;
LOG.info("");
LOG.info("Passing tests:");
LOG.info("--------------");
for (i = 0; i < testsFromConfigFile.size(); i++) {
boolean resultBoolean = testsFromConfigFile.get(i).getTestResult();
if (resultBoolean) {
LOG.info((i + 1) + ": " +
testsFromConfigFile.get(i).getTestDesc());
foundTests = true;
}
}
if (!foundTests) {
LOG.info("NONE");
}
assertTrue("One of the tests failed. " +
"See the Detailed results to identify " +
"the command that failed", overallResults);
}
/**
* Compare the actual output with the expected output
* @param compdata
* @return
*/
private boolean compareTestOutput(ComparatorData compdata, Result cmdResult) {
// Compare the output based on the comparator
String comparatorType = compdata.getComparatorType();
Class<?> comparatorClass = null;
// If testMode is "test", then run the command and compare the output
// If testMode is "nocompare", then run the command and dump the output.
// Do not compare
boolean compareOutput = false;
if (testMode.equals(TESTMODE_TEST)) {
try {
// Initialize the comparator class and run its compare method
comparatorClass = Class.forName("org.apache.hadoop.cli.util." +
comparatorType);
ComparatorBase comp = (ComparatorBase) comparatorClass.newInstance();
compareOutput = comp.compare(cmdResult.getCommandOutput(),
expandCommand(compdata.getExpectedOutput()));
} catch (Exception e) {
LOG.info("Error in instantiating the comparator" + e);
}
}
return compareOutput;
}
/***********************************
************* TESTS RUNNER
*********************************/
public void testAll() {
assertTrue("Number of tests has to be greater then zero",
testsFromConfigFile.size() > 0);
LOG.info("TestAll");
// Run the tests defined in the testConf.xml config file.
for (int index = 0; index < testsFromConfigFile.size(); index++) {
CLITestData testdata = testsFromConfigFile.get(index);
// Execute the test commands
ArrayList<CLICommand> testCommands = testdata.getTestCommands();
Result cmdResult = null;
for (CLICommand cmd : testCommands) {
try {
cmdResult = execute(cmd);
} catch (Exception e) {
fail(StringUtils.stringifyException(e));
}
}
boolean overallTCResult = true;
// Run comparators
ArrayList<ComparatorData> compdata = testdata.getComparatorData();
for (ComparatorData cd : compdata) {
final String comptype = cd.getComparatorType();
boolean compareOutput = false;
if (! comptype.equalsIgnoreCase("none")) {
compareOutput = compareTestOutput(cd, cmdResult);
overallTCResult &= compareOutput;
}
cd.setExitCode(cmdResult.getExitCode());
cd.setActualOutput(cmdResult.getCommandOutput());
cd.setTestResult(compareOutput);
}
testdata.setTestResult(overallTCResult);
// Execute the cleanup commands
ArrayList<CLICommand> cleanupCommands = testdata.getCleanupCommands();
for (CLICommand cmd : cleanupCommands) {
try {
execute(cmd);
} catch (Exception e) {
fail(StringUtils.stringifyException(e));
}
}
}
}
/**
* this method has to be overridden by an ancestor
*/
protected CommandExecutor.Result execute(CLICommand cmd) throws Exception {
throw new Exception("Unknown type of test command:"+ cmd.getType());
}
/*
* Parser class for the test config xml file
*/
class TestConfigFileParser extends DefaultHandler {
String charString = null;
CLITestData td = null;
ArrayList<CLICommand> testCommands = null;
ArrayList<CLICommand> cleanupCommands = null;
boolean runOnWindows = true;
@Override
public void startDocument() throws SAXException {
testsFromConfigFile = new ArrayList<CLITestData>();
}
@Override
public void startElement(String uri,
String localName,
String qName,
Attributes attributes) throws SAXException {
if (qName.equals("test")) {
td = new CLITestData();
} else if (qName.equals("test-commands")) {
testCommands = new ArrayList<CLICommand>();
} else if (qName.equals("cleanup-commands")) {
cleanupCommands = new ArrayList<CLICommand>();
} else if (qName.equals("comparators")) {
testComparators = new ArrayList<ComparatorData>();
} else if (qName.equals("comparator")) {
comparatorData = new ComparatorData();
}
charString = "";
}
@Override
public void endElement(String uri, String localName,String qName)
throws SAXException {
if (qName.equals("description")) {
td.setTestDesc(charString);
} else if (qName.equals("windows")) {
runOnWindows = Boolean.parseBoolean(charString);
} else if (qName.equals("test-commands")) {
td.setTestCommands(testCommands);
testCommands = null;
} else if (qName.equals("cleanup-commands")) {
td.setCleanupCommands(cleanupCommands);
cleanupCommands = null;
} else if (qName.equals("command")) {
if (testCommands != null) {
testCommands.add(new CLITestCmd(charString, new CLICommandFS()));
} else if (cleanupCommands != null) {
cleanupCommands.add(new CLITestCmd(charString, new CLICommandFS()));
}
} else if (qName.equals("comparators")) {
td.setComparatorData(testComparators);
} else if (qName.equals("comparator")) {
testComparators.add(comparatorData);
} else if (qName.equals("type")) {
comparatorData.setComparatorType(charString);
} else if (qName.equals("expected-output")) {
comparatorData.setExpectedOutput(charString);
} else if (qName.equals("test")) {
if (!Shell.WINDOWS || runOnWindows) {
testsFromConfigFile.add(td);
}
td = null;
runOnWindows = true;
} else if (qName.equals("mode")) {
testMode = charString;
if (!testMode.equals(TESTMODE_NOCOMPARE) &&
!testMode.equals(TESTMODE_TEST)) {
testMode = TESTMODE_TEST;
}
}
}
@Override
public void characters(char[] ch,
int start,
int length) throws SAXException {
String s = new String(ch, start, length);
charString += s;
}
}
}
| 15,051 | 32.598214 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli;
import org.apache.hadoop.cli.util.CLICommand;
import org.apache.hadoop.cli.util.CommandExecutor;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Tests for the Command Line Interface (CLI)
*/
public class TestCLI extends CLITestHelper {
@Before
@Override
public void setUp() throws Exception {
super.setUp();
}
@After
@Override
public void tearDown() throws Exception {
super.tearDown();
}
@Override
protected CommandExecutor.Result execute(CLICommand cmd) throws Exception {
return cmd.getExecutor("").executeCommand(cmd.getCmd());
}
@Override
protected String getTestFile() {
return "testConf.xml";
}
@Test
@Override
public void testAll() {
super.testAll();
}
}
| 1,601 | 25.7 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
import java.util.ArrayList;
/**
*
* Class to store CLI Test Data
*/
public class CLITestData {
private String testDesc = null;
private ArrayList<CLICommand> testCommands = null;
private ArrayList<CLICommand> cleanupCommands = null;
private ArrayList<ComparatorData> comparatorData = null;
private boolean testResult = false;
public CLITestData() {
}
/**
* @return the testDesc
*/
public String getTestDesc() {
return testDesc;
}
/**
* @param testDesc the testDesc to set
*/
public void setTestDesc(String testDesc) {
this.testDesc = testDesc;
}
/**
* @return the testCommands
*/
public ArrayList<CLICommand> getTestCommands() {
return testCommands;
}
/**
* @param testCommands the testCommands to set
*/
public void setTestCommands(ArrayList<CLICommand> testCommands) {
this.testCommands = testCommands;
}
/**
* @return the comparatorData
*/
public ArrayList<ComparatorData> getComparatorData() {
return comparatorData;
}
/**
* @param comparatorData the comparatorData to set
*/
public void setComparatorData(ArrayList<ComparatorData> comparatorData) {
this.comparatorData = comparatorData;
}
/**
* @return the testResult
*/
public boolean getTestResult() {
return testResult;
}
/**
* @param testResult the testResult to set
*/
public void setTestResult(boolean testResult) {
this.testResult = testResult;
}
/**
* @return the cleanupCommands
*/
public ArrayList<CLICommand> getCleanupCommands() {
return cleanupCommands;
}
/**
* @param cleanupCommands the cleanupCommands to set
*/
public void setCleanupCommands(ArrayList<CLICommand> cleanupCommands) {
this.cleanupCommands = cleanupCommands;
}
}
| 2,635 | 23.407407 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
/**
* This interface is to generalize types of test command for upstream projects
*/
public interface CLICommand {
public CommandExecutor getExecutor(String tag) throws IllegalArgumentException;
public CLICommandTypes getType();
public String getCmd();
@Override
public String toString();
}
| 1,153 | 37.466667 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/RegexpComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Comparator for the Command line tests.
*
* This comparator searches for the regular expression specified in 'expected'
* in the string 'actual' and returns true if the regular expression match is
* done
*
*/
public class RegexpComparator extends ComparatorBase {
@Override
public boolean compare(String actual, String expected) {
boolean success = false;
Pattern p = Pattern.compile(expected);
StringTokenizer tokenizer = new StringTokenizer(actual, "\n\r");
while (tokenizer.hasMoreTokens() && !success) {
String actualToken = tokenizer.nextToken();
Matcher m = p.matcher(actualToken);
success = m.matches();
}
return success;
}
}
| 1,653 | 31.431373 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
import org.apache.hadoop.fs.FsShell;
/**
* Class to define Test Command along with its type
*/
public class CLITestCmd implements CLICommand {
private final CLICommandTypes type;
private final String cmd;
public CLITestCmd(String str, CLICommandTypes type) {
cmd = str;
this.type = type;
}
@Override
public CommandExecutor getExecutor(String tag) throws IllegalArgumentException {
if (getType() instanceof CLICommandFS)
return new FSCmdExecutor(tag, new FsShell());
throw new
IllegalArgumentException("Unknown type of test command: " + getType());
}
@Override
public CLICommandTypes getType() {
return type;
}
@Override
public String getCmd() {
return cmd;
}
@Override
public String toString() {
return cmd;
}
}
| 1,642 | 27.824561 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommandTypes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
/**
* This interface is to provide command type for test commands enums
*/
public interface CLICommandTypes {
}
| 964 | 37.6 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/ComparatorBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
/**
*
* Comparator interface. To define a new comparator, implement the compare
* method
*/
public abstract class ComparatorBase {
public ComparatorBase() {
}
/**
* Compare method for the comparator class.
* @param actual output. can be null
* @param expected output. can be null
* @return true if expected output compares with the actual output, else
* return false. If actual or expected is null, return false
*/
public abstract boolean compare(String actual, String expected);
}
| 1,373 | 33.35 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/TokenComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
import java.util.StringTokenizer;
/**
* Comparator for the Command line tests.
*
* This comparator compares each token in the expected output and returns true
* if all tokens are in the actual output
*
*/
public class TokenComparator extends ComparatorBase {
@Override
public boolean compare(String actual, String expected) {
boolean compareOutput = true;
StringTokenizer tokenizer = new StringTokenizer(expected, ",\n\r");
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (actual.indexOf(token) != -1) {
compareOutput &= true;
} else {
compareOutput &= false;
}
}
return compareOutput;
}
}
| 1,556 | 30.14 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/ComparatorData.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
/**
*
* Class to store CLI Test Comparators Data
*/
public class ComparatorData {
private String expectedOutput = null;
private String actualOutput = null;
private boolean testResult = false;
private int exitCode = 0;
private String comparatorType = null;
public ComparatorData() {
}
/**
* @return the expectedOutput
*/
public String getExpectedOutput() {
return expectedOutput;
}
/**
* @param expectedOutput the expectedOutput to set
*/
public void setExpectedOutput(String expectedOutput) {
this.expectedOutput = expectedOutput;
}
/**
* @return the actualOutput
*/
public String getActualOutput() {
return actualOutput;
}
/**
* @param actualOutput the actualOutput to set
*/
public void setActualOutput(String actualOutput) {
this.actualOutput = actualOutput;
}
/**
* @return the testResult
*/
public boolean getTestResult() {
return testResult;
}
/**
* @param testResult the testResult to set
*/
public void setTestResult(boolean testResult) {
this.testResult = testResult;
}
/**
* @return the exitCode
*/
public int getExitCode() {
return exitCode;
}
/**
* @param exitCode the exitCode to set
*/
public void setExitCode(int exitCode) {
this.exitCode = exitCode;
}
/**
* @return the comparatorType
*/
public String getComparatorType() {
return comparatorType;
}
/**
* @param comparatorType the comparatorType to set
*/
public void setComparatorType(String comparatorType) {
this.comparatorType = comparatorType;
}
}
| 2,456 | 21.962617 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/FSCmdExecutor.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.util.ToolRunner;
public class FSCmdExecutor extends CommandExecutor {
protected String namenode = null;
protected FsShell shell = null;
public FSCmdExecutor(String namenode, FsShell shell) {
this.namenode = namenode;
this.shell = shell;
}
@Override
protected void execute(final String cmd) throws Exception{
String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
ToolRunner.run(shell, args);
}
}
| 1,345 | 34.421053 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/SubstringComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
public class SubstringComparator extends ComparatorBase {
@Override
public boolean compare(String actual, String expected) {
int compareOutput = actual.indexOf(expected);
if (compareOutput == -1) {
return false;
}
return true;
}
}
| 1,107 | 31.588235 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/RegexpAcrossOutputComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
import org.apache.hadoop.util.Shell;
import java.util.regex.Pattern;
/**
* Comparator for command line tests that attempts to find a regexp
* within the entire text returned by a command.
*
* This comparator differs from RegexpComparator in that it attempts
* to match the pattern within all of the text returned by the command,
* rather than matching against each line of the returned text. This
* allows matching against patterns that span multiple lines.
*/
public class RegexpAcrossOutputComparator extends ComparatorBase {
@Override
public boolean compare(String actual, String expected) {
if (Shell.WINDOWS) {
actual = actual.replaceAll("\\r", "");
expected = expected.replaceAll("\\r", "");
}
return Pattern.compile(expected).matcher(actual).find();
}
}
| 1,648 | 35.644444 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/ExactComparator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
/**
* Comparator for the Command line tests.
*
* This comparator compares the actual to the expected and
* returns true only if they are the same
*
*/
public class ExactComparator extends ComparatorBase {
@Override
public boolean compare(String actual, String expected) {
return actual.equals(expected);
}
}
| 1,172 | 32.514286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CommandExecutor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
import org.apache.hadoop.cli.CLITestHelper;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
import java.util.StringTokenizer;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.ArrayList;
/**
*
* This class execute commands and captures the output
*/
public abstract class CommandExecutor {
protected String[] getCommandAsArgs(final String cmd, final String masterKey,
final String master) {
String regex = "\'([^\']*)\'|\"([^\"]*)\"|(\\S+)";
Matcher matcher = Pattern.compile(regex).matcher(cmd);
ArrayList<String> args = new ArrayList<String>();
String arg = null;
while (matcher.find()) {
if (matcher.group(1) != null) {
arg = matcher.group(1);
} else if (matcher.group(2) != null) {
arg = matcher.group(2);
} else {
arg = matcher.group(3);
}
arg = arg.replaceAll(masterKey, master);
arg = arg.replaceAll("CLITEST_DATA",
new File(CLITestHelper.TEST_CACHE_DATA_DIR).
toURI().toString().replace(' ', '+'));
arg = arg.replaceAll("USERNAME", System.getProperty("user.name"));
args.add(arg);
}
return args.toArray(new String[0]);
}
public Result executeCommand(final String cmd) throws Exception {
int exitCode = 0;
Exception lastException = null;
ByteArrayOutputStream bao = new ByteArrayOutputStream();
PrintStream origOut = System.out;
PrintStream origErr = System.err;
System.setOut(new PrintStream(bao));
System.setErr(new PrintStream(bao));
try {
execute(cmd);
} catch (Exception e) {
e.printStackTrace();
lastException = e;
exitCode = -1;
} finally {
System.setOut(origOut);
System.setErr(origErr);
}
return new Result(bao.toString(), exitCode, lastException, cmd);
}
protected abstract void execute(final String cmd) throws Exception;
public static class Result {
final String commandOutput;
final int exitCode;
final Exception exception;
final String cmdExecuted;
public Result(String commandOutput, int exitCode, Exception exception,
String cmdExecuted) {
this.commandOutput = commandOutput;
this.exitCode = exitCode;
this.exception = exception;
this.cmdExecuted = cmdExecuted;
}
public String getCommandOutput() {
return commandOutput;
}
public int getExitCode() {
return exitCode;
}
public Exception getException() {
return exception;
}
public String getCommand() {
return cmdExecuted;
}
}
}
| 3,536 | 27.756098 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommandFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.cli.util;
public class CLICommandFS implements CLICommandTypes {
}
| 907 | 40.272727 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tools/GetGroupsTestBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tools;
import static org.junit.Assert.assertEquals;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Before;
import org.junit.Test;
public abstract class GetGroupsTestBase {
protected Configuration conf;
private UserGroupInformation testUser1;
private UserGroupInformation testUser2;
protected abstract Tool getTool(PrintStream o);
@Before
public void setUpUsers() throws IOException {
// Make sure the current user's info is in the list of test users.
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
UserGroupInformation.createUserForTesting(currentUser.getUserName(), currentUser.getGroupNames());
testUser1 = UserGroupInformation.createUserForTesting("foo", new String[]{"bar", "baz"});
testUser2 = UserGroupInformation.createUserForTesting("fiz", new String[]{"buz", "boz"});
}
@Test
public void testNoUserGiven() throws Exception {
String actualOutput = runTool(conf, new String[0], true);
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
assertEquals("No user provided should default to current user",
getExpectedOutput(currentUser), actualOutput);
}
@Test
public void testExistingUser() throws Exception {
String actualOutput = runTool(conf, new String[]{testUser1.getUserName()}, true);
assertEquals("Show only the output of the user given",
getExpectedOutput(testUser1), actualOutput);
}
@Test
public void testMultipleExistingUsers() throws Exception {
String actualOutput = runTool(conf,
new String[]{testUser1.getUserName(), testUser2.getUserName()}, true);
assertEquals("Show the output for both users given",
getExpectedOutput(testUser1) + getExpectedOutput(testUser2), actualOutput);
}
@Test
public void testNonExistentUser() throws Exception {
String actualOutput = runTool(conf,
new String[]{"does-not-exist"}, true);
assertEquals("Show the output for only the user given, with no groups",
getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist")),
actualOutput);
}
@Test
public void testMultipleNonExistingUsers() throws Exception {
String actualOutput = runTool(conf,
new String[]{"does-not-exist1", "does-not-exist2"}, true);
assertEquals("Show the output for only the user given, with no groups",
getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist1")) +
getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist2")),
actualOutput);
}
@Test
public void testExistingInterleavedWithNonExistentUsers() throws Exception {
String actualOutput = runTool(conf,
new String[]{"does-not-exist1", testUser1.getUserName(),
"does-not-exist2", testUser2.getUserName()}, true);
assertEquals("Show the output for only the user given, with no groups",
getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist1")) +
getExpectedOutput(testUser1) +
getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist2")) +
getExpectedOutput(testUser2),
actualOutput);
}
private static String getExpectedOutput(UserGroupInformation user) {
String expectedOutput = user.getUserName() + " :";
for (String group : user.getGroupNames()) {
expectedOutput += " " + group;
}
return expectedOutput + System.getProperty("line.separator");
}
private String runTool(Configuration conf, String[] args, boolean success)
throws Exception {
ByteArrayOutputStream o = new ByteArrayOutputStream();
PrintStream out = new PrintStream(o, true);
try {
int ret = ToolRunner.run(getTool(out), args);
assertEquals(success, ret == 0);
return o.toString();
} finally {
o.close();
out.close();
}
}
}
| 4,974 | 37.867188 | 102 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.