repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.EnumSet;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.ByteBufferReadable;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.io.ByteBufferPool;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.RandomDatum;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public abstract class CryptoStreamsTestBase {
protected static final Log LOG = LogFactory.getLog(
CryptoStreamsTestBase.class);
protected static CryptoCodec codec;
private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16};
private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
protected static final int count = 10000;
protected static int defaultBufferSize = 8192;
protected static int smallBufferSize = 1024;
private byte[] data;
private int dataLen;
@Before
public void setUp() throws IOException {
// Generate data
final int seed = new Random().nextInt();
final DataOutputBuffer dataBuf = new DataOutputBuffer();
final RandomDatum.Generator generator = new RandomDatum.Generator(seed);
for(int i = 0; i < count; ++i) {
generator.next();
final RandomDatum key = generator.getKey();
final RandomDatum value = generator.getValue();
key.write(dataBuf);
value.write(dataBuf);
}
LOG.info("Generated " + count + " records");
data = dataBuf.getData();
dataLen = dataBuf.getLength();
}
protected void writeData(OutputStream out) throws Exception {
out.write(data, 0, dataLen);
out.close();
}
protected int getDataLen() {
return dataLen;
}
private int readAll(InputStream in, byte[] b, int off, int len)
throws IOException {
int n = 0;
int total = 0;
while (n != -1) {
total += n;
if (total >= len) {
break;
}
n = in.read(b, off + total, len - total);
}
return total;
}
protected OutputStream getOutputStream(int bufferSize) throws IOException {
return getOutputStream(bufferSize, key, iv);
}
protected abstract OutputStream getOutputStream(int bufferSize, byte[] key,
byte[] iv) throws IOException;
protected InputStream getInputStream(int bufferSize) throws IOException {
return getInputStream(bufferSize, key, iv);
}
protected abstract InputStream getInputStream(int bufferSize, byte[] key,
byte[] iv) throws IOException;
/** Test crypto reading with different buffer size. */
@Test(timeout=120000)
public void testRead() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
// Default buffer size
InputStream in = getInputStream(defaultBufferSize);
readCheck(in);
in.close();
// Small buffer size
in = getInputStream(smallBufferSize);
readCheck(in);
in.close();
}
private void readCheck(InputStream in) throws Exception {
byte[] result = new byte[dataLen];
int n = readAll(in, result, 0, dataLen);
Assert.assertEquals(dataLen, n);
byte[] expectedData = new byte[n];
System.arraycopy(data, 0, expectedData, 0, n);
Assert.assertArrayEquals(result, expectedData);
// EOF
n = in.read(result, 0, dataLen);
Assert.assertEquals(n, -1);
in.close();
}
/** Test crypto writing with different buffer size. */
@Test(timeout = 120000)
public void testWrite() throws Exception {
// Default buffer size
writeCheck(defaultBufferSize);
// Small buffer size
writeCheck(smallBufferSize);
}
private void writeCheck(int bufferSize) throws Exception {
OutputStream out = getOutputStream(bufferSize);
writeData(out);
if (out instanceof FSDataOutputStream) {
Assert.assertEquals(((FSDataOutputStream) out).getPos(), getDataLen());
}
}
/** Test crypto with different IV. */
@Test(timeout=120000)
public void testCryptoIV() throws Exception {
byte[] iv1 = iv.clone();
// Counter base: Long.MAX_VALUE
setCounterBaseForIV(iv1, Long.MAX_VALUE);
cryptoCheck(iv1);
// Counter base: Long.MAX_VALUE - 1
setCounterBaseForIV(iv1, Long.MAX_VALUE - 1);
cryptoCheck(iv1);
// Counter base: Integer.MAX_VALUE
setCounterBaseForIV(iv1, Integer.MAX_VALUE);
cryptoCheck(iv1);
// Counter base: 0
setCounterBaseForIV(iv1, 0);
cryptoCheck(iv1);
// Counter base: -1
setCounterBaseForIV(iv1, -1);
cryptoCheck(iv1);
}
private void cryptoCheck(byte[] iv) throws Exception {
OutputStream out = getOutputStream(defaultBufferSize, key, iv);
writeData(out);
InputStream in = getInputStream(defaultBufferSize, key, iv);
readCheck(in);
in.close();
}
private void setCounterBaseForIV(byte[] iv, long counterBase) {
ByteBuffer buf = ByteBuffer.wrap(iv);
buf.order(ByteOrder.BIG_ENDIAN);
buf.putLong(iv.length - 8, counterBase);
}
/**
* Test hflush/hsync of crypto output stream, and with different buffer size.
*/
@Test(timeout=120000)
public void testSyncable() throws IOException {
syncableCheck();
}
private void syncableCheck() throws IOException {
OutputStream out = getOutputStream(smallBufferSize);
try {
int bytesWritten = dataLen / 3;
out.write(data, 0, bytesWritten);
((Syncable) out).hflush();
InputStream in = getInputStream(defaultBufferSize);
verify(in, bytesWritten, data);
in.close();
out.write(data, bytesWritten, dataLen - bytesWritten);
((Syncable) out).hsync();
in = getInputStream(defaultBufferSize);
verify(in, dataLen, data);
in.close();
} finally {
out.close();
}
}
private void verify(InputStream in, int bytesToVerify,
byte[] expectedBytes) throws IOException {
final byte[] readBuf = new byte[bytesToVerify];
readAll(in, readBuf, 0, bytesToVerify);
for (int i = 0; i < bytesToVerify; i++) {
Assert.assertEquals(expectedBytes[i], readBuf[i]);
}
}
private int readAll(InputStream in, long pos, byte[] b, int off, int len)
throws IOException {
int n = 0;
int total = 0;
while (n != -1) {
total += n;
if (total >= len) {
break;
}
n = ((PositionedReadable) in).read(pos + total, b, off + total,
len - total);
}
return total;
}
/** Test positioned read. */
@Test(timeout=120000)
public void testPositionedRead() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
InputStream in = getInputStream(defaultBufferSize);
// Pos: 1/3 dataLen
positionedReadCheck(in , dataLen / 3);
// Pos: 1/2 dataLen
positionedReadCheck(in, dataLen / 2);
in.close();
}
private void positionedReadCheck(InputStream in, int pos) throws Exception {
byte[] result = new byte[dataLen];
int n = readAll(in, pos, result, 0, dataLen);
Assert.assertEquals(dataLen, n + pos);
byte[] readData = new byte[n];
System.arraycopy(result, 0, readData, 0, n);
byte[] expectedData = new byte[n];
System.arraycopy(data, pos, expectedData, 0, n);
Assert.assertArrayEquals(readData, expectedData);
}
/** Test read fully */
@Test(timeout=120000)
public void testReadFully() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
InputStream in = getInputStream(defaultBufferSize);
final int len1 = dataLen / 4;
// Read len1 bytes
byte[] readData = new byte[len1];
readAll(in, readData, 0, len1);
byte[] expectedData = new byte[len1];
System.arraycopy(data, 0, expectedData, 0, len1);
Assert.assertArrayEquals(readData, expectedData);
// Pos: 1/3 dataLen
readFullyCheck(in, dataLen / 3);
// Read len1 bytes
readData = new byte[len1];
readAll(in, readData, 0, len1);
expectedData = new byte[len1];
System.arraycopy(data, len1, expectedData, 0, len1);
Assert.assertArrayEquals(readData, expectedData);
// Pos: 1/2 dataLen
readFullyCheck(in, dataLen / 2);
// Read len1 bytes
readData = new byte[len1];
readAll(in, readData, 0, len1);
expectedData = new byte[len1];
System.arraycopy(data, 2 * len1, expectedData, 0, len1);
Assert.assertArrayEquals(readData, expectedData);
in.close();
}
private void readFullyCheck(InputStream in, int pos) throws Exception {
byte[] result = new byte[dataLen - pos];
((PositionedReadable) in).readFully(pos, result);
byte[] expectedData = new byte[dataLen - pos];
System.arraycopy(data, pos, expectedData, 0, dataLen - pos);
Assert.assertArrayEquals(result, expectedData);
result = new byte[dataLen]; // Exceeds maximum length
try {
((PositionedReadable) in).readFully(pos, result);
Assert.fail("Read fully exceeds maximum length should fail.");
} catch (IOException e) {
}
}
/** Test seek to different position. */
@Test(timeout=120000)
public void testSeek() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
InputStream in = getInputStream(defaultBufferSize);
// Pos: 1/3 dataLen
seekCheck(in, dataLen / 3);
// Pos: 0
seekCheck(in, 0);
// Pos: 1/2 dataLen
seekCheck(in, dataLen / 2);
final long pos = ((Seekable) in).getPos();
// Pos: -3
try {
seekCheck(in, -3);
Assert.fail("Seek to negative offset should fail.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot seek to negative " +
"offset", e);
}
Assert.assertEquals(pos, ((Seekable) in).getPos());
// Pos: dataLen + 3
try {
seekCheck(in, dataLen + 3);
Assert.fail("Seek after EOF should fail.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Cannot seek after EOF", e);
}
Assert.assertEquals(pos, ((Seekable) in).getPos());
in.close();
}
private void seekCheck(InputStream in, int pos) throws Exception {
byte[] result = new byte[dataLen];
((Seekable) in).seek(pos);
int n = readAll(in, result, 0, dataLen);
Assert.assertEquals(dataLen, n + pos);
byte[] readData = new byte[n];
System.arraycopy(result, 0, readData, 0, n);
byte[] expectedData = new byte[n];
System.arraycopy(data, pos, expectedData, 0, n);
Assert.assertArrayEquals(readData, expectedData);
}
/** Test get position. */
@Test(timeout=120000)
public void testGetPos() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
// Default buffer size
InputStream in = getInputStream(defaultBufferSize);
byte[] result = new byte[dataLen];
int n1 = readAll(in, result, 0, dataLen / 3);
Assert.assertEquals(n1, ((Seekable) in).getPos());
int n2 = readAll(in, result, n1, dataLen - n1);
Assert.assertEquals(n1 + n2, ((Seekable) in).getPos());
in.close();
}
@Test(timeout=120000)
public void testAvailable() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
// Default buffer size
InputStream in = getInputStream(defaultBufferSize);
byte[] result = new byte[dataLen];
int n1 = readAll(in, result, 0, dataLen / 3);
Assert.assertEquals(in.available(), dataLen - n1);
int n2 = readAll(in, result, n1, dataLen - n1);
Assert.assertEquals(in.available(), dataLen - n1 - n2);
in.close();
}
/** Test skip. */
@Test(timeout=120000)
public void testSkip() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
// Default buffer size
InputStream in = getInputStream(defaultBufferSize);
byte[] result = new byte[dataLen];
int n1 = readAll(in, result, 0, dataLen / 3);
Assert.assertEquals(n1, ((Seekable) in).getPos());
long skipped = in.skip(dataLen / 3);
int n2 = readAll(in, result, 0, dataLen);
Assert.assertEquals(dataLen, n1 + skipped + n2);
byte[] readData = new byte[n2];
System.arraycopy(result, 0, readData, 0, n2);
byte[] expectedData = new byte[n2];
System.arraycopy(data, dataLen - n2, expectedData, 0, n2);
Assert.assertArrayEquals(readData, expectedData);
try {
skipped = in.skip(-3);
Assert.fail("Skip Negative length should fail.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Negative skip length", e);
}
// Skip after EOF
skipped = in.skip(3);
Assert.assertEquals(skipped, 0);
in.close();
}
private void byteBufferReadCheck(InputStream in, ByteBuffer buf,
int bufPos) throws Exception {
buf.position(bufPos);
int n = ((ByteBufferReadable) in).read(buf);
Assert.assertEquals(bufPos + n, buf.position());
byte[] readData = new byte[n];
buf.rewind();
buf.position(bufPos);
buf.get(readData);
byte[] expectedData = new byte[n];
System.arraycopy(data, 0, expectedData, 0, n);
Assert.assertArrayEquals(readData, expectedData);
}
/** Test byte buffer read with different buffer size. */
@Test(timeout=120000)
public void testByteBufferRead() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
// Default buffer size, initial buffer position is 0
InputStream in = getInputStream(defaultBufferSize);
ByteBuffer buf = ByteBuffer.allocate(dataLen + 100);
byteBufferReadCheck(in, buf, 0);
in.close();
// Default buffer size, initial buffer position is not 0
in = getInputStream(defaultBufferSize);
buf.clear();
byteBufferReadCheck(in, buf, 11);
in.close();
// Small buffer size, initial buffer position is 0
in = getInputStream(smallBufferSize);
buf.clear();
byteBufferReadCheck(in, buf, 0);
in.close();
// Small buffer size, initial buffer position is not 0
in = getInputStream(smallBufferSize);
buf.clear();
byteBufferReadCheck(in, buf, 11);
in.close();
// Direct buffer, default buffer size, initial buffer position is 0
in = getInputStream(defaultBufferSize);
buf = ByteBuffer.allocateDirect(dataLen + 100);
byteBufferReadCheck(in, buf, 0);
in.close();
// Direct buffer, default buffer size, initial buffer position is not 0
in = getInputStream(defaultBufferSize);
buf.clear();
byteBufferReadCheck(in, buf, 11);
in.close();
// Direct buffer, small buffer size, initial buffer position is 0
in = getInputStream(smallBufferSize);
buf.clear();
byteBufferReadCheck(in, buf, 0);
in.close();
// Direct buffer, small buffer size, initial buffer position is not 0
in = getInputStream(smallBufferSize);
buf.clear();
byteBufferReadCheck(in, buf, 11);
in.close();
}
@Test(timeout=120000)
public void testCombinedOp() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
final int len1 = dataLen / 8;
final int len2 = dataLen / 10;
InputStream in = getInputStream(defaultBufferSize);
// Read len1 data.
byte[] readData = new byte[len1];
readAll(in, readData, 0, len1);
byte[] expectedData = new byte[len1];
System.arraycopy(data, 0, expectedData, 0, len1);
Assert.assertArrayEquals(readData, expectedData);
long pos = ((Seekable) in).getPos();
Assert.assertEquals(len1, pos);
// Seek forward len2
((Seekable) in).seek(pos + len2);
// Skip forward len2
long n = in.skip(len2);
Assert.assertEquals(len2, n);
// Pos: 1/4 dataLen
positionedReadCheck(in , dataLen / 4);
// Pos should be len1 + len2 + len2
pos = ((Seekable) in).getPos();
Assert.assertEquals(len1 + len2 + len2, pos);
// Read forward len1
ByteBuffer buf = ByteBuffer.allocate(len1);
int nRead = ((ByteBufferReadable) in).read(buf);
Assert.assertEquals(nRead, buf.position());
readData = new byte[nRead];
buf.rewind();
buf.get(readData);
expectedData = new byte[nRead];
System.arraycopy(data, (int)pos, expectedData, 0, nRead);
Assert.assertArrayEquals(readData, expectedData);
long lastPos = pos;
// Pos should be lastPos + nRead
pos = ((Seekable) in).getPos();
Assert.assertEquals(lastPos + nRead, pos);
// Pos: 1/3 dataLen
positionedReadCheck(in , dataLen / 3);
// Read forward len1
readData = new byte[len1];
readAll(in, readData, 0, len1);
expectedData = new byte[len1];
System.arraycopy(data, (int)pos, expectedData, 0, len1);
Assert.assertArrayEquals(readData, expectedData);
lastPos = pos;
// Pos should be lastPos + len1
pos = ((Seekable) in).getPos();
Assert.assertEquals(lastPos + len1, pos);
// Read forward len1
buf = ByteBuffer.allocate(len1);
nRead = ((ByteBufferReadable) in).read(buf);
Assert.assertEquals(nRead, buf.position());
readData = new byte[nRead];
buf.rewind();
buf.get(readData);
expectedData = new byte[nRead];
System.arraycopy(data, (int)pos, expectedData, 0, nRead);
Assert.assertArrayEquals(readData, expectedData);
lastPos = pos;
// Pos should be lastPos + nRead
pos = ((Seekable) in).getPos();
Assert.assertEquals(lastPos + nRead, pos);
// ByteBuffer read after EOF
((Seekable) in).seek(dataLen);
buf.clear();
n = ((ByteBufferReadable) in).read(buf);
Assert.assertEquals(n, -1);
in.close();
}
@Test(timeout=120000)
public void testSeekToNewSource() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
InputStream in = getInputStream(defaultBufferSize);
final int len1 = dataLen / 8;
byte[] readData = new byte[len1];
readAll(in, readData, 0, len1);
// Pos: 1/3 dataLen
seekToNewSourceCheck(in, dataLen / 3);
// Pos: 0
seekToNewSourceCheck(in, 0);
// Pos: 1/2 dataLen
seekToNewSourceCheck(in, dataLen / 2);
// Pos: -3
try {
seekToNewSourceCheck(in, -3);
Assert.fail("Seek to negative offset should fail.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot seek to negative " +
"offset", e);
}
// Pos: dataLen + 3
try {
seekToNewSourceCheck(in, dataLen + 3);
Assert.fail("Seek after EOF should fail.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Attempted to read past " +
"end of file", e);
}
in.close();
}
private void seekToNewSourceCheck(InputStream in, int targetPos)
throws Exception {
byte[] result = new byte[dataLen];
((Seekable) in).seekToNewSource(targetPos);
int n = readAll(in, result, 0, dataLen);
Assert.assertEquals(dataLen, n + targetPos);
byte[] readData = new byte[n];
System.arraycopy(result, 0, readData, 0, n);
byte[] expectedData = new byte[n];
System.arraycopy(data, targetPos, expectedData, 0, n);
Assert.assertArrayEquals(readData, expectedData);
}
private ByteBufferPool getBufferPool() {
return new ByteBufferPool() {
@Override
public ByteBuffer getBuffer(boolean direct, int length) {
return ByteBuffer.allocateDirect(length);
}
@Override
public void putBuffer(ByteBuffer buffer) {
}
};
}
@Test(timeout=120000)
public void testHasEnhancedByteBufferAccess() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
InputStream in = getInputStream(defaultBufferSize);
final int len1 = dataLen / 8;
// ByteBuffer size is len1
ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).read(
getBufferPool(), len1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
int n1 = buffer.remaining();
byte[] readData = new byte[n1];
buffer.get(readData);
byte[] expectedData = new byte[n1];
System.arraycopy(data, 0, expectedData, 0, n1);
Assert.assertArrayEquals(readData, expectedData);
((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
// Read len1 bytes
readData = new byte[len1];
readAll(in, readData, 0, len1);
expectedData = new byte[len1];
System.arraycopy(data, n1, expectedData, 0, len1);
Assert.assertArrayEquals(readData, expectedData);
// ByteBuffer size is len1
buffer = ((HasEnhancedByteBufferAccess) in).read(
getBufferPool(), len1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
int n2 = buffer.remaining();
readData = new byte[n2];
buffer.get(readData);
expectedData = new byte[n2];
System.arraycopy(data, n1 + len1, expectedData, 0, n2);
Assert.assertArrayEquals(readData, expectedData);
((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
in.close();
}
}
| 22,677 | 29.980874 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.io.EOFException;
import java.io.FileDescriptor;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ByteBufferReadable;
import org.apache.hadoop.fs.CanSetDropBehind;
import org.apache.hadoop.fs.CanSetReadahead;
import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
import org.apache.hadoop.fs.HasFileDescriptor;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.io.ByteBufferPool;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
public class TestCryptoStreams extends CryptoStreamsTestBase {
/**
* Data storage.
* {@link #getOutputStream(int)} will write to this buf.
* {@link #getInputStream(int)} will read from this buf.
*/
private byte[] buf;
private int bufLen;
@BeforeClass
public static void init() throws Exception {
Configuration conf = new Configuration();
codec = CryptoCodec.getInstance(conf);
}
@AfterClass
public static void shutdown() throws Exception {
}
@Override
protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv)
throws IOException {
DataOutputBuffer out = new DataOutputBuffer() {
@Override
public void flush() throws IOException {
buf = getData();
bufLen = getLength();
}
@Override
public void close() throws IOException {
buf = getData();
bufLen = getLength();
}
};
return new CryptoOutputStream(new FakeOutputStream(out),
codec, bufferSize, key, iv);
}
@Override
protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv)
throws IOException {
DataInputBuffer in = new DataInputBuffer();
in.reset(buf, 0, bufLen);
return new CryptoInputStream(new FakeInputStream(in), codec, bufferSize,
key, iv);
}
private class FakeOutputStream extends OutputStream
implements Syncable, CanSetDropBehind{
private final byte[] oneByteBuf = new byte[1];
private final DataOutputBuffer out;
private boolean closed;
public FakeOutputStream(DataOutputBuffer out) {
this.out = out;
}
@Override
public void write(byte b[], int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
} else if (off < 0 || len < 0 || len > b.length - off) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return;
}
checkStream();
out.write(b, off, len);
}
@Override
public void flush() throws IOException {
checkStream();
out.flush();
}
@Override
public void close() throws IOException {
if (closed) {
return;
}
out.close();
closed = true;
}
@Override
public void write(int b) throws IOException {
oneByteBuf[0] = (byte)(b & 0xff);
write(oneByteBuf, 0, oneByteBuf.length);
}
@Override
public void setDropBehind(Boolean dropCache) throws IOException,
UnsupportedOperationException {
}
@Override
public void sync() throws IOException {
hflush();
}
@Override
public void hflush() throws IOException {
checkStream();
flush();
}
@Override
public void hsync() throws IOException {
checkStream();
flush();
}
private void checkStream() throws IOException {
if (closed) {
throw new IOException("Stream is closed!");
}
}
}
public static class FakeInputStream extends InputStream implements
Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor,
CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess {
private final byte[] oneByteBuf = new byte[1];
private int pos = 0;
private final byte[] data;
private final int length;
private boolean closed = false;
public FakeInputStream(DataInputBuffer in) {
data = in.getData();
length = in.getLength();
}
@Override
public void seek(long pos) throws IOException {
if (pos > length) {
throw new IOException("Cannot seek after EOF.");
}
if (pos < 0) {
throw new IOException("Cannot seek to negative offset.");
}
checkStream();
this.pos = (int)pos;
}
@Override
public long getPos() throws IOException {
return pos;
}
@Override
public int available() throws IOException {
return length - pos;
}
@Override
public int read(byte b[], int off, int len) throws IOException {
if (b == null) {
throw new NullPointerException();
} else if (off < 0 || len < 0 || len > b.length - off) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
checkStream();
if (pos < length) {
int n = (int) Math.min(len, length - pos);
System.arraycopy(data, pos, b, off, n);
pos += n;
return n;
}
return -1;
}
private void checkStream() throws IOException {
if (closed) {
throw new IOException("Stream is closed!");
}
}
@Override
public int read(ByteBuffer buf) throws IOException {
checkStream();
if (pos < length) {
int n = (int) Math.min(buf.remaining(), length - pos);
if (n > 0) {
buf.put(data, pos, n);
}
pos += n;
return n;
}
return -1;
}
@Override
public long skip(long n) throws IOException {
checkStream();
if ( n > 0 ) {
if( n + pos > length ) {
n = length - pos;
}
pos += n;
return n;
}
return n < 0 ? -1 : 0;
}
@Override
public void close() throws IOException {
closed = true;
}
@Override
public int read(long position, byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
} else if (off < 0 || len < 0 || len > b.length - off) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return 0;
}
if (position > length) {
throw new IOException("Cannot read after EOF.");
}
if (position < 0) {
throw new IOException("Cannot read to negative offset.");
}
checkStream();
if (position < length) {
int n = (int) Math.min(len, length - position);
System.arraycopy(data, (int)position, b, off, n);
return n;
}
return -1;
}
@Override
public void readFully(long position, byte[] b, int off, int len)
throws IOException {
if (b == null) {
throw new NullPointerException();
} else if (off < 0 || len < 0 || len > b.length - off) {
throw new IndexOutOfBoundsException();
} else if (len == 0) {
return;
}
if (position > length) {
throw new IOException("Cannot read after EOF.");
}
if (position < 0) {
throw new IOException("Cannot read to negative offset.");
}
checkStream();
if (position + len > length) {
throw new EOFException("Reach the end of stream.");
}
System.arraycopy(data, (int)position, b, off, len);
}
@Override
public void readFully(long position, byte[] buffer) throws IOException {
readFully(position, buffer, 0, buffer.length);
}
@Override
public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
EnumSet<ReadOption> opts) throws IOException,
UnsupportedOperationException {
if (bufferPool == null) {
throw new IOException("Please specify buffer pool.");
}
ByteBuffer buffer = bufferPool.getBuffer(true, maxLength);
int pos = buffer.position();
int n = read(buffer);
if (n >= 0) {
buffer.position(pos);
return buffer;
}
return null;
}
@Override
public void releaseBuffer(ByteBuffer buffer) {
}
@Override
public void setReadahead(Long readahead) throws IOException,
UnsupportedOperationException {
}
@Override
public void setDropBehind(Boolean dropCache) throws IOException,
UnsupportedOperationException {
}
@Override
public FileDescriptor getFileDescriptor() throws IOException {
return null;
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
if (targetPos > length) {
throw new IOException("Attempted to read past end of file.");
}
if (targetPos < 0) {
throw new IOException("Cannot seek after EOF.");
}
checkStream();
this.pos = (int)targetPos;
return false;
}
@Override
public int read() throws IOException {
int ret = read( oneByteBuf, 0, 1 );
return ( ret <= 0 ) ? -1 : (oneByteBuf[0] & 0xff);
}
}
}
| 10,256 | 25.850785 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.nio.ByteBuffer;
import java.security.NoSuchAlgorithmException;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.ShortBufferException;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assume;
import org.junit.Assert;
import org.junit.Test;
public class TestOpensslCipher {
private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16};
private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
@Test(timeout=120000)
public void testGetInstance() throws Exception {
Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding");
Assert.assertTrue(cipher != null);
try {
cipher = OpensslCipher.getInstance("AES2/CTR/NoPadding");
Assert.fail("Should specify correct algorithm.");
} catch (NoSuchAlgorithmException e) {
// Expect NoSuchAlgorithmException
}
try {
cipher = OpensslCipher.getInstance("AES/CTR/NoPadding2");
Assert.fail("Should specify correct padding.");
} catch (NoSuchPaddingException e) {
// Expect NoSuchPaddingException
}
}
@Test(timeout=120000)
public void testUpdateArguments() throws Exception {
Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding");
Assert.assertTrue(cipher != null);
cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv);
// Require direct buffers
ByteBuffer input = ByteBuffer.allocate(1024);
ByteBuffer output = ByteBuffer.allocate(1024);
try {
cipher.update(input, output);
Assert.fail("Input and output buffer should be direct buffer.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"Direct buffers are required", e);
}
// Output buffer length should be sufficient to store output data
input = ByteBuffer.allocateDirect(1024);
output = ByteBuffer.allocateDirect(1000);
try {
cipher.update(input, output);
Assert.fail("Output buffer length should be sufficient " +
"to store output data");
} catch (ShortBufferException e) {
GenericTestUtils.assertExceptionContains(
"Output buffer is not sufficient", e);
}
}
@Test(timeout=120000)
public void testDoFinalArguments() throws Exception {
Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding");
Assert.assertTrue(cipher != null);
cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv);
// Require direct buffer
ByteBuffer output = ByteBuffer.allocate(1024);
try {
cipher.doFinal(output);
Assert.fail("Output buffer should be direct buffer.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(
"Direct buffer is required", e);
}
}
}
| 4,000 | 35.045045 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
/**
* Test crypto streams using normal stream which does not support the
* additional interfaces that the Hadoop FileSystem streams implement
* (Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor,
* CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, Syncable,
* CanSetDropBehind)
*/
public class TestCryptoStreamsNormal extends CryptoStreamsTestBase {
/**
* Data storage.
* {@link #getOutputStream(int, byte[], byte[])} will write to this buffer.
* {@link #getInputStream(int, byte[], byte[])} will read from this buffer.
*/
private byte[] buffer;
private int bufferLen;
@BeforeClass
public static void init() throws Exception {
Configuration conf = new Configuration();
codec = CryptoCodec.getInstance(conf);
}
@AfterClass
public static void shutdown() throws Exception {
}
@Override
protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv)
throws IOException {
OutputStream out = new ByteArrayOutputStream() {
@Override
public void flush() throws IOException {
buffer = buf;
bufferLen = count;
}
@Override
public void close() throws IOException {
buffer = buf;
bufferLen = count;
}
};
return new CryptoOutputStream(out, codec, bufferSize, key, iv);
}
@Override
protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv)
throws IOException {
ByteArrayInputStream in = new ByteArrayInputStream(buffer, 0, bufferLen);
return new CryptoInputStream(in, codec, bufferSize,
key, iv);
}
@Ignore("Wrapped stream doesn't support Syncable")
@Override
@Test(timeout=1000)
public void testSyncable() throws IOException {}
@Ignore("Wrapped stream doesn't support PositionedRead")
@Override
@Test(timeout=1000)
public void testPositionedRead() throws IOException {}
@Ignore("Wrapped stream doesn't support ReadFully")
@Override
@Test(timeout=1000)
public void testReadFully() throws IOException {}
@Ignore("Wrapped stream doesn't support Seek")
@Override
@Test(timeout=1000)
public void testSeek() throws IOException {}
@Ignore("Wrapped stream doesn't support ByteBufferRead")
@Override
@Test(timeout=1000)
public void testByteBufferRead() throws IOException {}
@Ignore("Wrapped stream doesn't support ByteBufferRead, Seek")
@Override
@Test(timeout=1000)
public void testCombinedOp() throws IOException {}
@Ignore("Wrapped stream doesn't support SeekToNewSource")
@Override
@Test(timeout=1000)
public void testSeekToNewSource() throws IOException {}
@Ignore("Wrapped stream doesn't support HasEnhancedByteBufferAccess")
@Override
@Test(timeout=1000)
public void testHasEnhancedByteBufferAccess() throws IOException {}
}
| 3,994 | 31.217742 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
public class TestCryptoStreamsForLocalFS extends CryptoStreamsTestBase {
private static final String TEST_ROOT_DIR
= System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs";
private final File base = new File(TEST_ROOT_DIR);
private final Path file = new Path(TEST_ROOT_DIR, "test-file");
private static LocalFileSystem fileSys;
@BeforeClass
public static void init() throws Exception {
Configuration conf = new Configuration(false);
conf.set("fs.file.impl", LocalFileSystem.class.getName());
fileSys = FileSystem.getLocal(conf);
codec = CryptoCodec.getInstance(conf);
}
@AfterClass
public static void shutdown() throws Exception {
}
@Before
@Override
public void setUp() throws IOException {
fileSys.delete(new Path(TEST_ROOT_DIR), true);
super.setUp();
}
@After
public void cleanUp() throws IOException {
FileUtil.setWritable(base, true);
FileUtil.fullyDelete(base);
assertTrue(!base.exists());
}
@Override
protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv)
throws IOException {
return new CryptoOutputStream(fileSys.create(file), codec, bufferSize,
key, iv);
}
@Override
protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv)
throws IOException {
return new CryptoInputStream(fileSys.open(file), codec, bufferSize,
key, iv);
}
@Ignore("ChecksumFSInputChecker doesn't support ByteBuffer read")
@Override
@Test(timeout=1000)
public void testByteBufferRead() throws Exception {}
@Ignore("ChecksumFSOutputSummer doesn't support Syncable")
@Override
@Test(timeout=1000)
public void testSyncable() throws IOException {}
@Ignore("ChecksumFSInputChecker doesn't support ByteBuffer read")
@Override
@Test(timeout=1000)
public void testCombinedOp() throws Exception {}
@Ignore("ChecksumFSInputChecker doesn't support enhanced ByteBuffer access")
@Override
@Test(timeout=1000)
public void testHasEnhancedByteBufferAccess() throws Exception {
}
@Ignore("ChecksumFSInputChecker doesn't support seekToNewSource")
@Override
@Test(timeout=1000)
public void testSeekToNewSource() throws Exception {
}
}
| 3,674 | 30.956522 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.math.BigInteger;
import java.security.GeneralSecurityException;
import java.security.SecureRandom;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.RandomDatum;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import com.google.common.primitives.Longs;
public class TestCryptoCodec {
private static final Log LOG= LogFactory.getLog(TestCryptoCodec.class);
private static byte[] key = new byte[16];
private static byte[] iv = new byte[16];
private static final int bufferSize = 4096;
private Configuration conf = new Configuration();
private int count = 10000;
private int seed = new Random().nextInt();
private final String jceCodecClass =
"org.apache.hadoop.crypto.JceAesCtrCryptoCodec";
private final String opensslCodecClass =
"org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec";
@Before
public void setUp() throws IOException {
Random random = new SecureRandom();
random.nextBytes(key);
random.nextBytes(iv);
}
@Test(timeout=120000)
public void testJceAesCtrCryptoCodec() throws Exception {
GenericTestUtils.assumeInNativeProfile();
if (!NativeCodeLoader.buildSupportsOpenssl()) {
LOG.warn("Skipping test since openSSL library not loaded");
Assume.assumeTrue(false);
}
Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason());
cryptoCodecTest(conf, seed, 0, jceCodecClass, jceCodecClass, iv);
cryptoCodecTest(conf, seed, count, jceCodecClass, jceCodecClass, iv);
cryptoCodecTest(conf, seed, count, jceCodecClass, opensslCodecClass, iv);
// Overflow test, IV: xx xx xx xx xx xx xx xx ff ff ff ff ff ff ff ff
for(int i = 0; i < 8; i++) {
iv[8 + i] = (byte) 0xff;
}
cryptoCodecTest(conf, seed, count, jceCodecClass, jceCodecClass, iv);
cryptoCodecTest(conf, seed, count, jceCodecClass, opensslCodecClass, iv);
}
@Test(timeout=120000)
public void testOpensslAesCtrCryptoCodec() throws Exception {
GenericTestUtils.assumeInNativeProfile();
if (!NativeCodeLoader.buildSupportsOpenssl()) {
LOG.warn("Skipping test since openSSL library not loaded");
Assume.assumeTrue(false);
}
Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason());
cryptoCodecTest(conf, seed, 0, opensslCodecClass, opensslCodecClass, iv);
cryptoCodecTest(conf, seed, count, opensslCodecClass, opensslCodecClass, iv);
cryptoCodecTest(conf, seed, count, opensslCodecClass, jceCodecClass, iv);
// Overflow test, IV: xx xx xx xx xx xx xx xx ff ff ff ff ff ff ff ff
for(int i = 0; i < 8; i++) {
iv[8 + i] = (byte) 0xff;
}
cryptoCodecTest(conf, seed, count, opensslCodecClass, opensslCodecClass, iv);
cryptoCodecTest(conf, seed, count, opensslCodecClass, jceCodecClass, iv);
}
private void cryptoCodecTest(Configuration conf, int seed, int count,
String encCodecClass, String decCodecClass, byte[] iv) throws IOException,
GeneralSecurityException {
CryptoCodec encCodec = null;
try {
encCodec = (CryptoCodec)ReflectionUtils.newInstance(
conf.getClassByName(encCodecClass), conf);
} catch (ClassNotFoundException cnfe) {
throw new IOException("Illegal crypto codec!");
}
LOG.info("Created a Codec object of type: " + encCodecClass);
// Generate data
DataOutputBuffer data = new DataOutputBuffer();
RandomDatum.Generator generator = new RandomDatum.Generator(seed);
for(int i = 0; i < count; ++i) {
generator.next();
RandomDatum key = generator.getKey();
RandomDatum value = generator.getValue();
key.write(data);
value.write(data);
}
LOG.info("Generated " + count + " records");
// Encrypt data
DataOutputBuffer encryptedDataBuffer = new DataOutputBuffer();
CryptoOutputStream out = new CryptoOutputStream(encryptedDataBuffer,
encCodec, bufferSize, key, iv);
out.write(data.getData(), 0, data.getLength());
out.flush();
out.close();
LOG.info("Finished encrypting data");
CryptoCodec decCodec = null;
try {
decCodec = (CryptoCodec)ReflectionUtils.newInstance(
conf.getClassByName(decCodecClass), conf);
} catch (ClassNotFoundException cnfe) {
throw new IOException("Illegal crypto codec!");
}
LOG.info("Created a Codec object of type: " + decCodecClass);
// Decrypt data
DataInputBuffer decryptedDataBuffer = new DataInputBuffer();
decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0,
encryptedDataBuffer.getLength());
CryptoInputStream in = new CryptoInputStream(decryptedDataBuffer,
decCodec, bufferSize, key, iv);
DataInputStream dataIn = new DataInputStream(new BufferedInputStream(in));
// Check
DataInputBuffer originalData = new DataInputBuffer();
originalData.reset(data.getData(), 0, data.getLength());
DataInputStream originalIn = new DataInputStream(
new BufferedInputStream(originalData));
for(int i=0; i < count; ++i) {
RandomDatum k1 = new RandomDatum();
RandomDatum v1 = new RandomDatum();
k1.readFields(originalIn);
v1.readFields(originalIn);
RandomDatum k2 = new RandomDatum();
RandomDatum v2 = new RandomDatum();
k2.readFields(dataIn);
v2.readFields(dataIn);
assertTrue("original and encrypted-then-decrypted-output not equal",
k1.equals(k2) && v1.equals(v2));
// original and encrypted-then-decrypted-output have the same hashCode
Map<RandomDatum, String> m = new HashMap<RandomDatum, String>();
m.put(k1, k1.toString());
m.put(v1, v1.toString());
String result = m.get(k2);
assertEquals("k1 and k2 hashcode not equal", result, k1.toString());
result = m.get(v2);
assertEquals("v1 and v2 hashcode not equal", result, v1.toString());
}
// Decrypt data byte-at-a-time
originalData.reset(data.getData(), 0, data.getLength());
decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0,
encryptedDataBuffer.getLength());
in = new CryptoInputStream(decryptedDataBuffer,
decCodec, bufferSize, key, iv);
// Check
originalIn = new DataInputStream(new BufferedInputStream(originalData));
int expected;
do {
expected = originalIn.read();
assertEquals("Decrypted stream read by byte does not match",
expected, in.read());
} while (expected != -1);
// Seek to a certain position and decrypt
originalData.reset(data.getData(), 0, data.getLength());
decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0,
encryptedDataBuffer.getLength());
in = new CryptoInputStream(new TestCryptoStreams.FakeInputStream(
decryptedDataBuffer), decCodec, bufferSize, key, iv);
int seekPos = data.getLength() / 3;
in.seek(seekPos);
// Check
TestCryptoStreams.FakeInputStream originalInput =
new TestCryptoStreams.FakeInputStream(originalData);
originalInput.seek(seekPos);
do {
expected = originalInput.read();
assertEquals("Decrypted stream read by byte does not match",
expected, in.read());
} while (expected != -1);
LOG.info("SUCCESS! Completed checking " + count + " records");
// Check secure random generator
testSecureRandom(encCodec);
}
/** Test secure random generator */
private void testSecureRandom(CryptoCodec codec) {
// len = 16
checkSecureRandom(codec, 16);
// len = 32
checkSecureRandom(codec, 32);
// len = 128
checkSecureRandom(codec, 128);
}
private void checkSecureRandom(CryptoCodec codec, int len) {
byte[] rand = new byte[len];
byte[] rand1 = new byte[len];
codec.generateSecureRandom(rand);
codec.generateSecureRandom(rand1);
Assert.assertEquals(len, rand.length);
Assert.assertEquals(len, rand1.length);
Assert.assertFalse(Arrays.equals(rand, rand1));
}
/**
* Regression test for IV calculation, see HADOOP-11343
*/
@Test(timeout=120000)
public void testCalculateIV() throws Exception {
JceAesCtrCryptoCodec codec = new JceAesCtrCryptoCodec();
codec.setConf(conf);
SecureRandom sr = new SecureRandom();
byte[] initIV = new byte[16];
byte[] IV = new byte[16];
long iterations = 1000;
long counter = 10000;
// Overflow test, IV: 00 00 00 00 00 00 00 00 ff ff ff ff ff ff ff ff
for(int i = 0; i < 8; i++) {
initIV[8 + i] = (byte)0xff;
}
for(long j = 0; j < counter; j++) {
assertIVCalculation(codec, initIV, j, IV);
}
// Random IV and counter sequence test
for(long i = 0; i < iterations; i++) {
sr.nextBytes(initIV);
for(long j = 0; j < counter; j++) {
assertIVCalculation(codec, initIV, j, IV);
}
}
// Random IV and random counter test
for(long i = 0; i < iterations; i++) {
sr.nextBytes(initIV);
for(long j = 0; j < counter; j++) {
long c = sr.nextLong();
assertIVCalculation(codec, initIV, c, IV);
}
}
}
private void assertIVCalculation(CryptoCodec codec, byte[] initIV,
long counter, byte[] IV) {
codec.calculateIV(initIV, counter, IV);
BigInteger iv = new BigInteger(1, IV);
BigInteger ref = calculateRef(initIV, counter);
assertTrue("Calculated IV don't match with the reference", iv.equals(ref));
}
private static BigInteger calculateRef(byte[] initIV, long counter) {
byte[] cb = Longs.toByteArray(counter);
BigInteger bi = new BigInteger(1, initIV);
return bi.add(new BigInteger(1, cb));
}
}
| 11,160 | 34.88746 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithJceAesCtrCryptoCodec.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.junit.Assert;
import org.junit.BeforeClass;
public class TestCryptoStreamsWithJceAesCtrCryptoCodec extends
TestCryptoStreams {
@BeforeClass
public static void init() throws Exception {
Configuration conf = new Configuration();
conf.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY,
JceAesCtrCryptoCodec.class.getName());
codec = CryptoCodec.getInstance(conf);
Assert.assertEquals(JceAesCtrCryptoCodec.class.getCanonicalName(),
codec.getClass().getCanonicalName());
}
}
| 1,524 | 38.102564 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.BeforeClass;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
public class TestCryptoStreamsWithOpensslAesCtrCryptoCodec
extends TestCryptoStreams {
@BeforeClass
public static void init() throws Exception {
GenericTestUtils.assumeInNativeProfile();
Configuration conf = new Configuration();
conf.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY,
OpensslAesCtrCryptoCodec.class.getName());
codec = CryptoCodec.getInstance(conf);
assertNotNull("Unable to instantiate codec " +
OpensslAesCtrCryptoCodec.class.getName() + ", is the required "
+ "version of OpenSSL installed?", codec);
assertEquals(OpensslAesCtrCryptoCodec.class.getCanonicalName(),
codec.getClass().getCanonicalName());
}
}
| 1,866 | 39.586957 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.random;
import java.util.Arrays;
import org.junit.Test;
public class TestOpensslSecureRandom {
@Test(timeout=120000)
public void testRandomBytes() throws Exception {
OpensslSecureRandom random = new OpensslSecureRandom();
// len = 16
checkRandomBytes(random, 16);
// len = 32
checkRandomBytes(random, 32);
// len = 128
checkRandomBytes(random, 128);
// len = 256
checkRandomBytes(random, 256);
}
/**
* Test will timeout if secure random implementation always returns a
* constant value.
*/
private void checkRandomBytes(OpensslSecureRandom random, int len) {
byte[] bytes = new byte[len];
byte[] bytes1 = new byte[len];
random.nextBytes(bytes);
random.nextBytes(bytes1);
while (Arrays.equals(bytes, bytes1)) {
random.nextBytes(bytes1);
}
}
/**
* Test will timeout if secure random implementation always returns a
* constant value.
*/
@Test(timeout=120000)
public void testRandomInt() throws Exception {
OpensslSecureRandom random = new OpensslSecureRandom();
int rand1 = random.nextInt();
int rand2 = random.nextInt();
while (rand1 == rand2) {
rand2 = random.nextInt();
}
}
/**
* Test will timeout if secure random implementation always returns a
* constant value.
*/
@Test(timeout=120000)
public void testRandomLong() throws Exception {
OpensslSecureRandom random = new OpensslSecureRandom();
long rand1 = random.nextLong();
long rand2 = random.nextLong();
while (rand1 == rand2) {
rand2 = random.nextLong();
}
}
/**
* Test will timeout if secure random implementation always returns a
* constant value.
*/
@Test(timeout=120000)
public void testRandomFloat() throws Exception {
OpensslSecureRandom random = new OpensslSecureRandom();
float rand1 = random.nextFloat();
float rand2 = random.nextFloat();
while (rand1 == rand2) {
rand2 = random.nextFloat();
}
}
/**
* Test will timeout if secure random implementation always returns a
* constant value.
*/
@Test(timeout=120000)
public void testRandomDouble() throws Exception {
OpensslSecureRandom random = new OpensslSecureRandom();
double rand1 = random.nextDouble();
double rand2 = random.nextDouble();
while (rand1 == rand2) {
rand2 = random.nextDouble();
}
}
}
| 3,268 | 27.675439 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.random;
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.lang.SystemUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.junit.Assume;
import org.junit.Test;
public class TestOsSecureRandom {
private static OsSecureRandom getOsSecureRandom() throws IOException {
Assume.assumeTrue(SystemUtils.IS_OS_LINUX);
OsSecureRandom random = new OsSecureRandom();
random.setConf(new Configuration());
return random;
}
@Test(timeout=120000)
public void testRandomBytes() throws Exception {
OsSecureRandom random = getOsSecureRandom();
// len = 16
checkRandomBytes(random, 16);
// len = 32
checkRandomBytes(random, 32);
// len = 128
checkRandomBytes(random, 128);
// len = 256
checkRandomBytes(random, 256);
random.close();
}
/**
* Test will timeout if secure random implementation always returns a
* constant value.
*/
private void checkRandomBytes(OsSecureRandom random, int len) {
byte[] bytes = new byte[len];
byte[] bytes1 = new byte[len];
random.nextBytes(bytes);
random.nextBytes(bytes1);
while (Arrays.equals(bytes, bytes1)) {
random.nextBytes(bytes1);
}
}
/**
* Test will timeout if secure random implementation always returns a
* constant value.
*/
@Test(timeout=120000)
public void testRandomInt() throws Exception {
OsSecureRandom random = getOsSecureRandom();
int rand1 = random.nextInt();
int rand2 = random.nextInt();
while (rand1 == rand2) {
rand2 = random.nextInt();
}
random.close();
}
/**
* Test will timeout if secure random implementation always returns a
* constant value.
*/
@Test(timeout=120000)
public void testRandomLong() throws Exception {
OsSecureRandom random = getOsSecureRandom();
long rand1 = random.nextLong();
long rand2 = random.nextLong();
while (rand1 == rand2) {
rand2 = random.nextLong();
}
random.close();
}
/**
* Test will timeout if secure random implementation always returns a
* constant value.
*/
@Test(timeout=120000)
public void testRandomFloat() throws Exception {
OsSecureRandom random = getOsSecureRandom();
float rand1 = random.nextFloat();
float rand2 = random.nextFloat();
while (rand1 == rand2) {
rand2 = random.nextFloat();
}
random.close();
}
/**
* Test will timeout if secure random implementation always returns a
* constant value.
*/
@Test(timeout=120000)
public void testRandomDouble() throws Exception {
OsSecureRandom random = getOsSecureRandom();
double rand1 = random.nextDouble();
double rand2 = random.nextDouble();
while (rand1 == rand2) {
rand2 = random.nextDouble();
}
random.close();
}
@Test(timeout=120000)
public void testRefillReservoir() throws Exception {
OsSecureRandom random = getOsSecureRandom();
for (int i = 0; i < 8196; i++) {
random.nextLong();
}
random.close();
}
}
| 3,955 | 27.056738 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestCachingKeyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.util.Date;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
public class TestCachingKeyProvider {
@Test
public void testCurrentKey() throws Exception {
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null);
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
// asserting caching
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
Thread.sleep(1200);
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
// asserting no caching when key is not known
cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(null, cache.getCurrentKey("k2"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k2"));
Assert.assertEquals(null, cache.getCurrentKey("k2"));
Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k2"));
}
@Test
public void testKeyVersion() throws Exception {
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0")))
.thenReturn(mockKey);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null);
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
// asserting caching
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(1))
.getKeyVersion(Mockito.eq("k1@0"));
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(1))
.getKeyVersion(Mockito.eq("k1@0"));
Thread.sleep(200);
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(2))
.getKeyVersion(Mockito.eq("k1@0"));
// asserting no caching when key is not known
cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(null, cache.getKeyVersion("k2@0"));
Mockito.verify(mockProv, Mockito.times(1))
.getKeyVersion(Mockito.eq("k2@0"));
Assert.assertEquals(null, cache.getKeyVersion("k2@0"));
Mockito.verify(mockProv, Mockito.times(2))
.getKeyVersion(Mockito.eq("k2@0"));
}
@Test
public void testMetadata() throws Exception {
KeyProvider.Metadata mockMeta = Mockito.mock(KeyProvider.Metadata.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(mockMeta);
Mockito.when(mockProv.getMetadata(Mockito.eq("k2"))).thenReturn(null);
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
// asserting caching
Assert.assertEquals(mockMeta, cache.getMetadata("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getMetadata(Mockito.eq("k1"));
Assert.assertEquals(mockMeta, cache.getMetadata("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getMetadata(Mockito.eq("k1"));
Thread.sleep(200);
Assert.assertEquals(mockMeta, cache.getMetadata("k1"));
Mockito.verify(mockProv, Mockito.times(2)).getMetadata(Mockito.eq("k1"));
// asserting no caching when key is not known
cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(null, cache.getMetadata("k2"));
Mockito.verify(mockProv, Mockito.times(1)).getMetadata(Mockito.eq("k2"));
Assert.assertEquals(null, cache.getMetadata("k2"));
Mockito.verify(mockProv, Mockito.times(2)).getMetadata(Mockito.eq("k2"));
}
@Test
public void testRollNewVersion() throws Exception {
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
cache.rollNewVersion("k1");
// asserting the cache is purged
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
cache.rollNewVersion("k1", new byte[0]);
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(3)).getCurrentKey(Mockito.eq("k1"));
}
@Test
public void testDeleteKey() throws Exception {
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0")))
.thenReturn(mockKey);
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(
new KMSClientProvider.KMSMetadata("c", 0, "l", null, new Date(), 1));
Mockito.when(mockProv.getConf()).thenReturn(new Configuration());
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(1))
.getKeyVersion(Mockito.eq("k1@0"));
cache.deleteKey("k1");
// asserting the cache is purged
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(2))
.getKeyVersion(Mockito.eq("k1@0"));
}
}
| 7,485 | 46.081761 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.net.URI;
import java.security.SecureRandom;
import java.util.Arrays;
import javax.crypto.Cipher;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
public class TestKeyProviderCryptoExtension {
private static final String CIPHER = "AES";
private static final String ENCRYPTION_KEY_NAME = "fooKey";
private static Configuration conf;
private static KeyProvider kp;
private static KeyProviderCryptoExtension kpExt;
private static KeyProvider.Options options;
private static KeyVersion encryptionKey;
@BeforeClass
public static void setup() throws Exception {
conf = new Configuration();
kp = new UserProvider.Factory().createProvider(new URI("user:///"), conf);
kpExt = KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
options = new KeyProvider.Options(conf);
options.setCipher(CIPHER);
options.setBitLength(128);
encryptionKey =
kp.createKey(ENCRYPTION_KEY_NAME, SecureRandom.getSeed(16), options);
}
@Test
public void testGenerateEncryptedKey() throws Exception {
// Generate a new EEK and check it
KeyProviderCryptoExtension.EncryptedKeyVersion ek1 =
kpExt.generateEncryptedKey(encryptionKey.getName());
assertEquals("Version name of EEK should be EEK",
KeyProviderCryptoExtension.EEK,
ek1.getEncryptedKeyVersion().getVersionName());
assertEquals("Name of EEK should be encryption key name",
ENCRYPTION_KEY_NAME, ek1.getEncryptionKeyName());
assertNotNull("Expected encrypted key material",
ek1.getEncryptedKeyVersion().getMaterial());
assertEquals("Length of encryption key material and EEK material should "
+ "be the same", encryptionKey.getMaterial().length,
ek1.getEncryptedKeyVersion().getMaterial().length
);
// Decrypt EEK into an EK and check it
KeyVersion k1 = kpExt.decryptEncryptedKey(ek1);
assertEquals(KeyProviderCryptoExtension.EK, k1.getVersionName());
assertEquals(encryptionKey.getMaterial().length, k1.getMaterial().length);
if (Arrays.equals(k1.getMaterial(), encryptionKey.getMaterial())) {
fail("Encrypted key material should not equal encryption key material");
}
if (Arrays.equals(ek1.getEncryptedKeyVersion().getMaterial(),
encryptionKey.getMaterial())) {
fail("Encrypted key material should not equal decrypted key material");
}
// Decrypt it again and it should be the same
KeyVersion k1a = kpExt.decryptEncryptedKey(ek1);
assertArrayEquals(k1.getMaterial(), k1a.getMaterial());
// Generate another EEK and make sure it's different from the first
KeyProviderCryptoExtension.EncryptedKeyVersion ek2 =
kpExt.generateEncryptedKey(encryptionKey.getName());
KeyVersion k2 = kpExt.decryptEncryptedKey(ek2);
if (Arrays.equals(k1.getMaterial(), k2.getMaterial())) {
fail("Generated EEKs should have different material!");
}
if (Arrays.equals(ek1.getEncryptedKeyIv(), ek2.getEncryptedKeyIv())) {
fail("Generated EEKs should have different IVs!");
}
}
@Test
public void testEncryptDecrypt() throws Exception {
// Get an EEK
KeyProviderCryptoExtension.EncryptedKeyVersion eek =
kpExt.generateEncryptedKey(encryptionKey.getName());
final byte[] encryptedKeyIv = eek.getEncryptedKeyIv();
final byte[] encryptedKeyMaterial = eek.getEncryptedKeyVersion()
.getMaterial();
// Decrypt it manually
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
cipher.init(Cipher.DECRYPT_MODE,
new SecretKeySpec(encryptionKey.getMaterial(), "AES"),
new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion
.deriveIV(encryptedKeyIv)));
final byte[] manualMaterial = cipher.doFinal(encryptedKeyMaterial);
// Test the createForDecryption factory method
EncryptedKeyVersion eek2 =
EncryptedKeyVersion.createForDecryption(eek.getEncryptionKeyName(),
eek.getEncryptionKeyVersionName(), eek.getEncryptedKeyIv(),
eek.getEncryptedKeyVersion().getMaterial());
// Decrypt it with the API
KeyVersion decryptedKey = kpExt.decryptEncryptedKey(eek2);
final byte[] apiMaterial = decryptedKey.getMaterial();
assertArrayEquals("Wrong key material from decryptEncryptedKey",
manualMaterial, apiMaterial);
}
}
| 5,657 | 40.602941 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestKeyShell {
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
private PrintStream initialStdOut;
private PrintStream initialStdErr;
/* The default JCEKS provider - for testing purposes */
private String jceksProvider;
@Before
public void setup() throws Exception {
outContent.reset();
errContent.reset();
final File tmpDir = new File(System.getProperty("test.build.data", "target"),
UUID.randomUUID().toString());
if (!tmpDir.mkdirs()) {
throw new IOException("Unable to create " + tmpDir);
}
final Path jksPath = new Path(tmpDir.toString(), "keystore.jceks");
jceksProvider = "jceks://file" + jksPath.toUri();
initialStdOut = System.out;
initialStdErr = System.err;
System.setOut(new PrintStream(outContent));
System.setErr(new PrintStream(errContent));
}
@After
public void cleanUp() throws Exception {
System.setOut(initialStdOut);
System.setErr(initialStdErr);
}
/**
* Delete a key from the default jceksProvider
* @param ks The KeyShell instance
* @param keyName The key to delete
* @throws Exception
*/
private void deleteKey(KeyShell ks, String keyName) throws Exception {
int rc;
outContent.reset();
final String[] delArgs =
{"delete", keyName, "-f", "-provider", jceksProvider};
rc = ks.run(delArgs);
assertEquals(0, rc);
assertTrue(outContent.toString().contains(keyName + " has been " +
"successfully deleted."));
}
/**
* Lists the keys in the jceksProvider
* @param ks The KeyShell instance
* @param wantMetadata True if you want metadata returned with the keys
* @return The output from the "list" call
* @throws Exception
*/
private String listKeys(KeyShell ks, boolean wantMetadata) throws Exception {
int rc;
outContent.reset();
final String[] listArgs = {"list", "-provider", jceksProvider };
final String[] listArgsM = {"list", "-metadata", "-provider", jceksProvider };
rc = ks.run(wantMetadata ? listArgsM : listArgs);
assertEquals(0, rc);
return outContent.toString();
}
@Test
public void testKeySuccessfulKeyLifecycle() throws Exception {
int rc = 0;
String keyName = "key1";
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
outContent.reset();
final String[] args1 = {"create", keyName, "-provider", jceksProvider};
rc = ks.run(args1);
assertEquals(0, rc);
assertTrue(outContent.toString().contains(keyName + " has been " +
"successfully created"));
String listOut = listKeys(ks, false);
assertTrue(listOut.contains(keyName));
listOut = listKeys(ks, true);
assertTrue(listOut.contains(keyName));
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("created"));
outContent.reset();
final String[] args2 = {"roll", keyName, "-provider", jceksProvider};
rc = ks.run(args2);
assertEquals(0, rc);
assertTrue(outContent.toString().contains("key1 has been successfully " +
"rolled."));
deleteKey(ks, keyName);
listOut = listKeys(ks, false);
assertFalse(listOut, listOut.contains(keyName));
}
/* HADOOP-10586 KeyShell didn't allow -description. */
@Test
public void testKeySuccessfulCreationWithDescription() throws Exception {
outContent.reset();
final String[] args1 = {"create", "key1", "-provider", jceksProvider,
"-description", "someDescription"};
int rc = 0;
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
assertEquals(0, rc);
assertTrue(outContent.toString().contains("key1 has been successfully " +
"created"));
String listOut = listKeys(ks, true);
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("someDescription"));
}
@Test
public void testInvalidKeySize() throws Exception {
final String[] args1 = {"create", "key1", "-size", "56", "-provider",
jceksProvider};
int rc = 0;
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
assertEquals(1, rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
@Test
public void testInvalidCipher() throws Exception {
final String[] args1 = {"create", "key1", "-cipher", "LJM", "-provider",
jceksProvider};
int rc = 0;
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
assertEquals(1, rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
@Test
public void testInvalidProvider() throws Exception {
final String[] args1 = {"create", "key1", "-cipher", "AES", "-provider",
"sdff://file/tmp/keystore.jceks"};
int rc = 0;
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
assertEquals(1, rc);
assertTrue(outContent.toString().contains("There are no valid " +
"KeyProviders configured."));
}
@Test
public void testTransientProviderWarning() throws Exception {
final String[] args1 = {"create", "key1", "-cipher", "AES", "-provider",
"user:///"};
int rc = 0;
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
assertEquals(0, rc);
assertTrue(outContent.toString().contains("WARNING: you are modifying a " +
"transient provider."));
}
@Test
public void testTransientProviderOnlyConfig() throws Exception {
final String[] args1 = {"create", "key1"};
int rc = 0;
KeyShell ks = new KeyShell();
Configuration config = new Configuration();
config.set(KeyProviderFactory.KEY_PROVIDER_PATH, "user:///");
ks.setConf(config);
rc = ks.run(args1);
assertEquals(1, rc);
assertTrue(outContent.toString().contains("There are no valid " +
"KeyProviders configured."));
}
@Test
public void testFullCipher() throws Exception {
final String keyName = "key1";
final String[] args1 = {"create", keyName, "-cipher", "AES/CBC/pkcs5Padding",
"-provider", jceksProvider};
int rc = 0;
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
assertEquals(0, rc);
assertTrue(outContent.toString().contains(keyName + " has been " +
"successfully created"));
deleteKey(ks, keyName);
}
@Test
public void testAttributes() throws Exception {
int rc;
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
/* Simple creation test */
final String[] args1 = {"create", "keyattr1", "-provider", jceksProvider,
"-attr", "foo=bar"};
rc = ks.run(args1);
assertEquals(0, rc);
assertTrue(outContent.toString().contains("keyattr1 has been " +
"successfully created"));
/* ...and list to see that we have the attr */
String listOut = listKeys(ks, true);
assertTrue(listOut.contains("keyattr1"));
assertTrue(listOut.contains("attributes: [foo=bar]"));
/* Negative tests: no attribute */
outContent.reset();
final String[] args2 = {"create", "keyattr2", "-provider", jceksProvider,
"-attr", "=bar"};
rc = ks.run(args2);
assertEquals(1, rc);
/* Not in attribute = value form */
outContent.reset();
args2[5] = "foo";
rc = ks.run(args2);
assertEquals(1, rc);
/* No attribute or value */
outContent.reset();
args2[5] = "=";
rc = ks.run(args2);
assertEquals(1, rc);
/* Legal: attribute is a, value is b=c */
outContent.reset();
args2[5] = "a=b=c";
rc = ks.run(args2);
assertEquals(0, rc);
listOut = listKeys(ks, true);
assertTrue(listOut.contains("keyattr2"));
assertTrue(listOut.contains("attributes: [a=b=c]"));
/* Test several attrs together... */
outContent.reset();
final String[] args3 = {"create", "keyattr3", "-provider", jceksProvider,
"-attr", "foo = bar",
"-attr", " glarch =baz ",
"-attr", "abc=def"};
rc = ks.run(args3);
assertEquals(0, rc);
/* ...and list to ensure they're there. */
listOut = listKeys(ks, true);
assertTrue(listOut.contains("keyattr3"));
assertTrue(listOut.contains("[foo=bar]"));
assertTrue(listOut.contains("[glarch=baz]"));
assertTrue(listOut.contains("[abc=def]"));
/* Negative test - repeated attributes should fail */
outContent.reset();
final String[] args4 = {"create", "keyattr4", "-provider", jceksProvider,
"-attr", "foo=bar",
"-attr", "foo=glarch"};
rc = ks.run(args4);
assertEquals(1, rc);
/* Clean up to be a good citizen */
deleteKey(ks, "keyattr1");
deleteKey(ks, "keyattr2");
deleteKey(ks, "keyattr3");
}
}
| 10,213 | 30.720497 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/FailureInjectingJavaKeyStoreProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
public class FailureInjectingJavaKeyStoreProvider extends JavaKeyStoreProvider {
public static final String SCHEME_NAME = "failjceks";
private boolean backupFail = false;
private boolean writeFail = false;
FailureInjectingJavaKeyStoreProvider(JavaKeyStoreProvider prov) {
super(prov);
}
public void setBackupFail(boolean b) {
backupFail = b;
}
public void setWriteFail(boolean b) {
backupFail = b;
}
// Failure injection methods..
@Override
public void writeToNew(Path newPath) throws IOException {
if (writeFail) {
throw new IOException("Injecting failure on write");
}
super.writeToNew(newPath);
}
@Override
public boolean backupToOld(Path oldPath) throws IOException {
if (backupFail) {
throw new IOException("Inejection Failure on backup");
}
return super.backupToOld(oldPath);
}
public static class Factory extends KeyProviderFactory {
@Override
public KeyProvider createProvider(URI providerName,
Configuration conf) throws IOException {
if (SCHEME_NAME.equals(providerName.getScheme())) {
try {
return new FailureInjectingJavaKeyStoreProvider(
(JavaKeyStoreProvider) new JavaKeyStoreProvider.Factory()
.createProvider(
new URI(providerName.toString().replace(SCHEME_NAME,
JavaKeyStoreProvider.SCHEME_NAME)), conf));
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
}
return null;
}
}
}
| 2,575 | 30.802469 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderDelegationTokenExtension.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
public class TestKeyProviderDelegationTokenExtension {
public static abstract class MockKeyProvider extends
KeyProvider implements DelegationTokenExtension {
public MockKeyProvider() {
super(new Configuration(false));
}
}
@Test
public void testCreateExtension() throws Exception {
Configuration conf = new Configuration();
Credentials credentials = new Credentials();
KeyProvider kp =
new UserProvider.Factory().createProvider(new URI("user:///"), conf);
KeyProviderDelegationTokenExtension kpDTE1 =
KeyProviderDelegationTokenExtension
.createKeyProviderDelegationTokenExtension(kp);
Assert.assertNotNull(kpDTE1);
// Default implementation should be a no-op and return null
Assert.assertNull(kpDTE1.addDelegationTokens("user", credentials));
MockKeyProvider mock = mock(MockKeyProvider.class);
Mockito.when(mock.getConf()).thenReturn(new Configuration());
when(mock.addDelegationTokens("renewer", credentials)).thenReturn(
new Token<?>[]{new Token(null, null, new Text("kind"), new Text(
"service"))}
);
KeyProviderDelegationTokenExtension kpDTE2 =
KeyProviderDelegationTokenExtension
.createKeyProviderDelegationTokenExtension(mock);
Token<?>[] tokens =
kpDTE2.addDelegationTokens("renewer", credentials);
Assert.assertNotNull(tokens);
Assert.assertEquals("kind", tokens[0].getKind().toString());
}
}
| 2,763 | 36.351351 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.ProviderUtils;
import org.junit.Test;
import java.io.IOException;
import java.net.URI;
import java.security.NoSuchAlgorithmException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertArrayEquals;
public class TestKeyProvider {
private static final String CIPHER = "AES";
@Test
public void testBuildVersionName() throws Exception {
assertEquals("/a/b@3", KeyProvider.buildVersionName("/a/b", 3));
assertEquals("/aaa@12", KeyProvider.buildVersionName("/aaa", 12));
}
@Test
public void testParseVersionName() throws Exception {
assertEquals("/a/b", KeyProvider.getBaseName("/a/b@3"));
assertEquals("/aaa", KeyProvider.getBaseName("/aaa@112"));
try {
KeyProvider.getBaseName("no-slashes");
assertTrue("should have thrown", false);
} catch (IOException e) {
assertTrue(true);
}
}
@Test
public void testKeyMaterial() throws Exception {
byte[] key1 = new byte[]{1,2,3,4};
KeyProvider.KeyVersion obj = new KeyProvider.KeyVersion("key1", "key1@1", key1);
assertEquals("key1@1", obj.getVersionName());
assertArrayEquals(new byte[]{1,2,3,4}, obj.getMaterial());
}
@Test
public void testMetadata() throws Exception {
//Metadata without description
DateFormat format = new SimpleDateFormat("y/m/d");
Date date = format.parse("2013/12/25");
KeyProvider.Metadata meta = new KeyProvider.Metadata("myCipher", 100, null,
null, date, 123);
assertEquals("myCipher", meta.getCipher());
assertEquals(100, meta.getBitLength());
assertNull(meta.getDescription());
assertEquals(date, meta.getCreated());
assertEquals(123, meta.getVersions());
KeyProvider.Metadata second = new KeyProvider.Metadata(meta.serialize());
assertEquals(meta.getCipher(), second.getCipher());
assertEquals(meta.getBitLength(), second.getBitLength());
assertNull(second.getDescription());
assertTrue(second.getAttributes().isEmpty());
assertEquals(meta.getCreated(), second.getCreated());
assertEquals(meta.getVersions(), second.getVersions());
int newVersion = second.addVersion();
assertEquals(123, newVersion);
assertEquals(124, second.getVersions());
assertEquals(123, meta.getVersions());
//Metadata with description
format = new SimpleDateFormat("y/m/d");
date = format.parse("2013/12/25");
Map<String, String> attributes = new HashMap<String, String>();
attributes.put("a", "A");
meta = new KeyProvider.Metadata("myCipher", 100,
"description", attributes, date, 123);
assertEquals("myCipher", meta.getCipher());
assertEquals(100, meta.getBitLength());
assertEquals("description", meta.getDescription());
assertEquals(attributes, meta.getAttributes());
assertEquals(date, meta.getCreated());
assertEquals(123, meta.getVersions());
second = new KeyProvider.Metadata(meta.serialize());
assertEquals(meta.getCipher(), second.getCipher());
assertEquals(meta.getBitLength(), second.getBitLength());
assertEquals(meta.getDescription(), second.getDescription());
assertEquals(meta.getAttributes(), second.getAttributes());
assertEquals(meta.getCreated(), second.getCreated());
assertEquals(meta.getVersions(), second.getVersions());
newVersion = second.addVersion();
assertEquals(123, newVersion);
assertEquals(124, second.getVersions());
assertEquals(123, meta.getVersions());
}
@Test
public void testOptions() throws Exception {
Configuration conf = new Configuration();
conf.set(KeyProvider.DEFAULT_CIPHER_NAME, "myCipher");
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 512);
Map<String, String> attributes = new HashMap<String, String>();
attributes.put("a", "A");
KeyProvider.Options options = KeyProvider.options(conf);
assertEquals("myCipher", options.getCipher());
assertEquals(512, options.getBitLength());
options.setCipher("yourCipher");
options.setDescription("description");
options.setAttributes(attributes);
options.setBitLength(128);
assertEquals("yourCipher", options.getCipher());
assertEquals(128, options.getBitLength());
assertEquals("description", options.getDescription());
assertEquals(attributes, options.getAttributes());
options = KeyProvider.options(new Configuration());
assertEquals(KeyProvider.DEFAULT_CIPHER, options.getCipher());
assertEquals(KeyProvider.DEFAULT_BITLENGTH, options.getBitLength());
}
@Test
public void testUnnestUri() throws Exception {
assertEquals(new Path("hdfs://nn.example.com/my/path"),
ProviderUtils.unnestUri(new URI("myscheme://[email protected]/my/path")));
assertEquals(new Path("hdfs://nn/my/path?foo=bar&baz=bat#yyy"),
ProviderUtils.unnestUri(new URI("myscheme://hdfs@nn/my/path?foo=bar&baz=bat#yyy")));
assertEquals(new Path("inner://[email protected]/my/path"),
ProviderUtils.unnestUri(new URI("outer://inner@[email protected]/my/path")));
assertEquals(new Path("user:///"),
ProviderUtils.unnestUri(new URI("outer://user/")));
}
private static class MyKeyProvider extends KeyProvider {
private String algorithm;
private int size;
private byte[] material;
public MyKeyProvider(Configuration conf) {
super(conf);
}
@Override
public KeyVersion getKeyVersion(String versionName)
throws IOException {
return null;
}
@Override
public List<String> getKeys() throws IOException {
return null;
}
@Override
public List<KeyVersion> getKeyVersions(String name)
throws IOException {
return null;
}
@Override
public Metadata getMetadata(String name) throws IOException {
return new Metadata(CIPHER, 128, "description", null, new Date(), 0);
}
@Override
public KeyVersion createKey(String name, byte[] material,
Options options) throws IOException {
this.material = material;
return null;
}
@Override
public void deleteKey(String name) throws IOException {
}
@Override
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
this.material = material;
return null;
}
@Override
public void flush() throws IOException {
}
@Override
protected byte[] generateKey(int size, String algorithm)
throws NoSuchAlgorithmException {
this.size = size;
this.algorithm = algorithm;
return super.generateKey(size, algorithm);
}
}
@Test
public void testMaterialGeneration() throws Exception {
MyKeyProvider kp = new MyKeyProvider(new Configuration());
KeyProvider.Options options = new KeyProvider.Options(new Configuration());
options.setCipher(CIPHER);
options.setBitLength(128);
kp.createKey("hello", options);
Assert.assertEquals(128, kp.size);
Assert.assertEquals(CIPHER, kp.algorithm);
Assert.assertNotNull(kp.material);
kp = new MyKeyProvider(new Configuration());
kp.rollNewVersion("hello");
Assert.assertEquals(128, kp.size);
Assert.assertEquals(CIPHER, kp.algorithm);
Assert.assertNotNull(kp.material);
}
@Test
public void testConfiguration() throws Exception {
Configuration conf = new Configuration(false);
conf.set("a", "A");
MyKeyProvider kp = new MyKeyProvider(conf);
Assert.assertEquals("A", kp.getConf().get("a"));
}
}
| 8,688 | 34.03629 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
public class TestKeyProviderFactory {
private FileSystemTestHelper fsHelper;
private File testRootDir;
@Before
public void setup() {
fsHelper = new FileSystemTestHelper();
String testRoot = fsHelper.getTestRootDir();
testRootDir = new File(testRoot).getAbsoluteFile();
}
@Test
public void testFactory() throws Exception {
Configuration conf = new Configuration();
final String userUri = UserProvider.SCHEME_NAME + ":///";
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
final String jksUri = JavaKeyStoreProvider.SCHEME_NAME +
"://file" + jksPath.toUri().toString();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
userUri + "," + jksUri);
List<KeyProvider> providers = KeyProviderFactory.getProviders(conf);
assertEquals(2, providers.size());
assertEquals(UserProvider.class, providers.get(0).getClass());
assertEquals(JavaKeyStoreProvider.class, providers.get(1).getClass());
assertEquals(userUri, providers.get(0).toString());
assertEquals(jksUri, providers.get(1).toString());
}
@Test
public void testFactoryErrors() throws Exception {
Configuration conf = new Configuration();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, "unknown:///");
try {
List<KeyProvider> providers = KeyProviderFactory.getProviders(conf);
assertTrue("should throw!", false);
} catch (IOException e) {
assertEquals("No KeyProviderFactory for unknown:/// in " +
KeyProviderFactory.KEY_PROVIDER_PATH,
e.getMessage());
}
}
@Test
public void testUriErrors() throws Exception {
Configuration conf = new Configuration();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, "unkn@own:/x/y");
try {
List<KeyProvider> providers = KeyProviderFactory.getProviders(conf);
assertTrue("should throw!", false);
} catch (IOException e) {
assertEquals("Bad configuration of " +
KeyProviderFactory.KEY_PROVIDER_PATH +
" at unkn@own:/x/y", e.getMessage());
}
}
static void checkSpecificProvider(Configuration conf,
String ourUrl) throws Exception {
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
byte[] key1 = new byte[16];
byte[] key2 = new byte[16];
byte[] key3 = new byte[16];
for(int i =0; i < key1.length; ++i) {
key1[i] = (byte) i;
key2[i] = (byte) (i * 2);
key3[i] = (byte) (i * 3);
}
// ensure that we get nulls when the key isn't there
assertEquals(null, provider.getKeyVersion("no-such-key"));
assertEquals(null, provider.getMetadata("key"));
// create a new key
try {
provider.createKey("key3", key3, KeyProvider.options(conf));
} catch (Exception e) {
e.printStackTrace();
throw e;
}
// check the metadata for key3
KeyProvider.Metadata meta = provider.getMetadata("key3");
assertEquals(KeyProvider.DEFAULT_CIPHER, meta.getCipher());
assertEquals(KeyProvider.DEFAULT_BITLENGTH, meta.getBitLength());
assertEquals(1, meta.getVersions());
// make sure we get back the right key
assertArrayEquals(key3, provider.getCurrentKey("key3").getMaterial());
assertEquals("key3@0", provider.getCurrentKey("key3").getVersionName());
// try recreating key3
try {
provider.createKey("key3", key3, KeyProvider.options(conf));
assertTrue("should throw", false);
} catch (IOException e) {
assertEquals("Key key3 already exists in " + ourUrl, e.getMessage());
}
provider.deleteKey("key3");
try {
provider.deleteKey("key3");
assertTrue("should throw", false);
} catch (IOException e) {
assertEquals("Key key3 does not exist in " + ourUrl, e.getMessage());
}
provider.createKey("key3", key3, KeyProvider.options(conf));
try {
provider.createKey("key4", key3,
KeyProvider.options(conf).setBitLength(8));
assertTrue("should throw", false);
} catch (IOException e) {
assertEquals("Wrong key length. Required 8, but got 128", e.getMessage());
}
provider.createKey("key4", new byte[]{1},
KeyProvider.options(conf).setBitLength(8));
provider.rollNewVersion("key4", new byte[]{2});
meta = provider.getMetadata("key4");
assertEquals(2, meta.getVersions());
assertArrayEquals(new byte[]{2},
provider.getCurrentKey("key4").getMaterial());
assertArrayEquals(new byte[]{1},
provider.getKeyVersion("key4@0").getMaterial());
assertEquals("key4@1", provider.getCurrentKey("key4").getVersionName());
try {
provider.rollNewVersion("key4", key1);
assertTrue("should throw", false);
} catch (IOException e) {
assertEquals("Wrong key length. Required 8, but got 128", e.getMessage());
}
try {
provider.rollNewVersion("no-such-key", key1);
assertTrue("should throw", false);
} catch (IOException e) {
assertEquals("Key no-such-key not found", e.getMessage());
}
provider.flush();
// get a new instance of the provider to ensure it was saved correctly
provider = KeyProviderFactory.getProviders(conf).get(0);
assertArrayEquals(new byte[]{2},
provider.getCurrentKey("key4").getMaterial());
assertArrayEquals(key3, provider.getCurrentKey("key3").getMaterial());
assertEquals("key3@0", provider.getCurrentKey("key3").getVersionName());
List<String> keys = provider.getKeys();
assertTrue("Keys should have been returned.", keys.size() == 2);
assertTrue("Returned Keys should have included key3.", keys.contains("key3"));
assertTrue("Returned Keys should have included key4.", keys.contains("key4"));
List<KeyVersion> kvl = provider.getKeyVersions("key3");
assertTrue("KeyVersions should have been returned for key3.", kvl.size() == 1);
assertTrue("KeyVersions should have included key3@0.", kvl.get(0).getVersionName().equals("key3@0"));
assertArrayEquals(key3, kvl.get(0).getMaterial());
}
@Test
public void testUserProvider() throws Exception {
Configuration conf = new Configuration();
final String ourUrl = UserProvider.SCHEME_NAME + ":///";
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
// see if the credentials are actually in the UGI
Credentials credentials =
UserGroupInformation.getCurrentUser().getCredentials();
assertArrayEquals(new byte[]{1},
credentials.getSecretKey(new Text("key4@0")));
assertArrayEquals(new byte[]{2},
credentials.getSecretKey(new Text("key4@1")));
}
@Test
public void testJksProvider() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testRootDir, "test.jks");
file.delete();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
// START : Test flush error by failure injection
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(
JavaKeyStoreProvider.SCHEME_NAME,
FailureInjectingJavaKeyStoreProvider.SCHEME_NAME));
// get a new instance of the provider to ensure it was saved correctly
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// inject failure during keystore write
FailureInjectingJavaKeyStoreProvider fProvider =
(FailureInjectingJavaKeyStoreProvider) provider;
fProvider.setWriteFail(true);
provider.createKey("key5", new byte[]{1},
KeyProvider.options(conf).setBitLength(8));
assertNotNull(provider.getCurrentKey("key5"));
try {
provider.flush();
Assert.fail("Should not succeed");
} catch (Exception e) {
// Ignore
}
// SHould be reset to pre-flush state
Assert.assertNull(provider.getCurrentKey("key5"));
// Un-inject last failure and
// inject failure during keystore backup
fProvider.setWriteFail(false);
fProvider.setBackupFail(true);
provider.createKey("key6", new byte[]{1},
KeyProvider.options(conf).setBitLength(8));
assertNotNull(provider.getCurrentKey("key6"));
try {
provider.flush();
Assert.fail("Should not succeed");
} catch (Exception e) {
// Ignore
}
// SHould be reset to pre-flush state
Assert.assertNull(provider.getCurrentKey("key6"));
// END : Test flush error by failure injection
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(
FailureInjectingJavaKeyStoreProvider.SCHEME_NAME,
JavaKeyStoreProvider.SCHEME_NAME));
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist", file.isFile());
// Corrupt file and Check if JKS can reload from _OLD file
File oldFile = new File(file.getPath() + "_OLD");
file.renameTo(oldFile);
file.delete();
file.createNewFile();
assertTrue(oldFile.exists());
provider = KeyProviderFactory.getProviders(conf).get(0);
assertTrue(file.exists());
assertTrue(oldFile + "should be deleted", !oldFile.exists());
verifyAfterReload(file, provider);
assertTrue(!oldFile.exists());
// _NEW and current file should not exist together
File newFile = new File(file.getPath() + "_NEW");
newFile.createNewFile();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("_NEW and current file should not exist together !!");
} catch (Exception e) {
// Ignore
} finally {
if (newFile.exists()) {
newFile.delete();
}
}
// Load from _NEW file
file.renameTo(newFile);
file.delete();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
} catch (Exception e) {
Assert.fail("JKS should load from _NEW file !!");
// Ignore
}
verifyAfterReload(file, provider);
// _NEW exists but corrupt.. must load from _OLD
newFile.createNewFile();
file.renameTo(oldFile);
file.delete();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
} catch (Exception e) {
Assert.fail("JKS should load from _OLD file !!");
// Ignore
} finally {
if (newFile.exists()) {
newFile.delete();
}
}
verifyAfterReload(file, provider);
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
// Check that an uppercase keyname results in an error
provider = KeyProviderFactory.getProviders(conf).get(0);
try {
provider.createKey("UPPERCASE", KeyProvider.options(conf));
Assert.fail("Expected failure on creating key name with uppercase " +
"characters");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Uppercase key names", e);
}
}
private void verifyAfterReload(File file, KeyProvider provider)
throws IOException {
List<String> existingKeys = provider.getKeys();
assertTrue(existingKeys.contains("key4"));
assertTrue(existingKeys.contains("key3"));
assertTrue(file.exists());
}
public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// let's add a new key and flush and check that permissions are still set to 777
byte[] key = new byte[16];
for(int i =0; i < key.length; ++i) {
key[i] = (byte) i;
}
// create a new key
try {
provider.createKey("key5", key, KeyProvider.options(conf));
} catch (Exception e) {
e.printStackTrace();
throw e;
}
provider.flush();
// get a new instance of the provider to ensure it was saved correctly
provider = KeyProviderFactory.getProviders(conf).get(0);
assertArrayEquals(key, provider.getCurrentKey("key5").getMaterial());
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue("Permissions should have been retained from the preexisting keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
@Test
public void testJksProviderPasswordViaConfig() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testRootDir, "test.jks");
file.delete();
try {
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,
"javakeystoreprovider.password");
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
provider.createKey("key3", new byte[16], KeyProvider.options(conf));
provider.flush();
} catch (Exception ex) {
Assert.fail("could not create keystore with password file");
}
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertNotNull(provider.getCurrentKey("key3"));
try {
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY, "bar");
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("using non existing password file, it should fail");
} catch (IOException ex) {
//NOP
}
try {
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY, "core-site.xml");
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("using different password file, it should fail");
} catch (IOException ex) {
//NOP
}
try {
conf.unset(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY);
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("No password file property, env not set, it should fail");
} catch (IOException ex) {
//NOP
}
}
@Test
public void testGetProviderViaURI() throws Exception {
Configuration conf = new Configuration(false);
final Path jksPath = new Path(testRootDir.toString(), "test.jks");
URI uri = new URI(JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
KeyProvider kp = KeyProviderFactory.get(uri, conf);
Assert.assertNotNull(kp);
Assert.assertEquals(JavaKeyStoreProvider.class, kp.getClass());
uri = new URI("foo://bar");
kp = KeyProviderFactory.get(uri, conf);
Assert.assertNull(kp);
}
@Test
public void testJksProviderWithKeytoolKeys() throws Exception {
final Configuration conf = new Configuration();
final String keystoreDirAbsolutePath =
conf.getResource("hdfs7067.keystore").getPath();
final String ourUrl = JavaKeyStoreProvider.SCHEME_NAME + "://file@/" +
keystoreDirAbsolutePath;
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
final KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// Sanity check that we are using the right keystore
@SuppressWarnings("unused")
final KeyProvider.KeyVersion keyVersion =
provider.getKeyVersion("testkey5@0");
try {
@SuppressWarnings("unused")
final KeyProvider.KeyVersion keyVersionWrongKeyNameFormat =
provider.getKeyVersion("testkey2");
fail("should have thrown an exception");
} catch (IOException e) {
// No version in key path testkey2/
GenericTestUtils.assertExceptionContains("No version in key path", e);
}
try {
@SuppressWarnings("unused")
final KeyProvider.KeyVersion keyVersionCurrentKeyNotWrongKeyNameFormat =
provider.getCurrentKey("testkey5@0");
fail("should have thrown an exception getting testkey5@0");
} catch (IOException e) {
// javax.crypto.spec.SecretKeySpec cannot be cast to
// org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata
GenericTestUtils.assertExceptionContains("other non-Hadoop method", e);
}
try {
@SuppressWarnings("unused")
KeyProvider.KeyVersion keyVersionCurrentKeyNotReally =
provider.getCurrentKey("testkey2");
fail("should have thrown an exception getting testkey2");
} catch (IOException e) {
// javax.crypto.spec.SecretKeySpec cannot be cast to
// org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata
GenericTestUtils.assertExceptionContains("other non-Hadoop method", e);
}
}
}
| 18,674 | 37.744813 | 137 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.util.Queue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.crypto.key.kms.ValueQueue;
import org.apache.hadoop.crypto.key.kms.ValueQueue.QueueRefiller;
import org.apache.hadoop.crypto.key.kms.ValueQueue.SyncGenerationPolicy;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.collect.Sets;
public class TestValueQueue {
private static class FillInfo {
final int num;
final String key;
FillInfo(int num, String key) {
this.num = num;
this.key = key;
}
}
private static class MockFiller implements QueueRefiller<String> {
final LinkedBlockingQueue<FillInfo> fillCalls =
new LinkedBlockingQueue<FillInfo>();
@Override
public void fillQueueForKey(String keyName, Queue<String> keyQueue,
int numValues) throws IOException {
fillCalls.add(new FillInfo(numValues, keyName));
for(int i = 0; i < numValues; i++) {
keyQueue.add("test");
}
}
public FillInfo getTop() throws InterruptedException {
return fillCalls.poll(500, TimeUnit.MILLISECONDS);
}
}
/**
* Verifies that Queue is initially filled to "numInitValues"
*/
@Test
public void testInitFill() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.1f, 300, 1,
SyncGenerationPolicy.ALL, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(1, filler.getTop().num);
vq.shutdown();
}
/**
* Verifies that Queue is initialized (Warmed-up) for provided keys
*/
@Test
public void testWarmUp() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.5f, 300, 1,
SyncGenerationPolicy.ALL, filler);
vq.initializeQueuesForKeys("k1", "k2", "k3");
FillInfo[] fillInfos =
{filler.getTop(), filler.getTop(), filler.getTop()};
Assert.assertEquals(5, fillInfos[0].num);
Assert.assertEquals(5, fillInfos[1].num);
Assert.assertEquals(5, fillInfos[2].num);
Assert.assertEquals(Sets.newHashSet("k1", "k2", "k3"),
Sets.newHashSet(fillInfos[0].key,
fillInfos[1].key,
fillInfos[2].key));
vq.shutdown();
}
/**
* Verifies that the refill task is executed after "checkInterval" if
* num values below "lowWatermark"
*/
@Test
public void testRefill() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.1f, 300, 1,
SyncGenerationPolicy.ALL, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(1, filler.getTop().num);
// Trigger refill
vq.getNext("k1");
Assert.assertEquals(1, filler.getTop().num);
Assert.assertEquals(10, filler.getTop().num);
vq.shutdown();
}
/**
* Verifies that the No refill Happens after "checkInterval" if
* num values above "lowWatermark"
*/
@Test
public void testNoRefill() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.5f, 300, 1,
SyncGenerationPolicy.ALL, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(5, filler.getTop().num);
Assert.assertEquals(null, filler.getTop());
vq.shutdown();
}
/**
* Verify getAtMost when SyncGeneration Policy = ALL
*/
@Test
public void testgetAtMostPolicyALL() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.1f, 300, 1,
SyncGenerationPolicy.ALL, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(1, filler.getTop().num);
// Drain completely
Assert.assertEquals(10, vq.getAtMost("k1", 10).size());
// Synchronous call
Assert.assertEquals(10, filler.getTop().num);
// Ask for more... return all
Assert.assertEquals(19, vq.getAtMost("k1", 19).size());
// Synchronous call (No Async call since num > lowWatermark)
Assert.assertEquals(19, filler.getTop().num);
vq.shutdown();
}
/**
* Verify getAtMost when SyncGeneration Policy = ALL
*/
@Test
public void testgetAtMostPolicyATLEAST_ONE() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.3f, 300, 1,
SyncGenerationPolicy.ATLEAST_ONE, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(3, filler.getTop().num);
// Drain completely
Assert.assertEquals(2, vq.getAtMost("k1", 10).size());
// Asynch Refill call
Assert.assertEquals(10, filler.getTop().num);
vq.shutdown();
}
/**
* Verify getAtMost when SyncGeneration Policy = LOW_WATERMARK
*/
@Test
public void testgetAtMostPolicyLOW_WATERMARK() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.3f, 300, 1,
SyncGenerationPolicy.LOW_WATERMARK, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(3, filler.getTop().num);
// Drain completely
Assert.assertEquals(3, vq.getAtMost("k1", 10).size());
// Synchronous call
Assert.assertEquals(1, filler.getTop().num);
// Asynch Refill call
Assert.assertEquals(10, filler.getTop().num);
vq.shutdown();
}
@Test
public void testDrain() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.1f, 300, 1,
SyncGenerationPolicy.ALL, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(1, filler.getTop().num);
vq.drain("k1");
Assert.assertNull(filler.getTop());
vq.shutdown();
}
}
| 6,808 | 32.214634 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
|
/** when(p1.getKMSUrl()).thenReturn("p1");
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.URI;
import java.security.NoSuchAlgorithmException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.collect.Sets;
public class TestLoadBalancingKMSClientProvider {
@Test
public void testCreation() throws Exception {
Configuration conf = new Configuration();
KeyProvider kp = new KMSClientProvider.Factory().createProvider(new URI(
"kms://http@host1/kms/foo"), conf);
assertTrue(kp instanceof KMSClientProvider);
assertEquals("http://host1/kms/foo/v1/",
((KMSClientProvider) kp).getKMSUrl());
kp = new KMSClientProvider.Factory().createProvider(new URI(
"kms://http@host1;host2;host3/kms/foo"), conf);
assertTrue(kp instanceof LoadBalancingKMSClientProvider);
KMSClientProvider[] providers =
((LoadBalancingKMSClientProvider) kp).getProviders();
assertEquals(3, providers.length);
assertEquals(Sets.newHashSet("http://host1/kms/foo/v1/",
"http://host2/kms/foo/v1/",
"http://host3/kms/foo/v1/"),
Sets.newHashSet(providers[0].getKMSUrl(),
providers[1].getKMSUrl(),
providers[2].getKMSUrl()));
kp = new KMSClientProvider.Factory().createProvider(new URI(
"kms://http@host1;host2;host3:16000/kms/foo"), conf);
assertTrue(kp instanceof LoadBalancingKMSClientProvider);
providers =
((LoadBalancingKMSClientProvider) kp).getProviders();
assertEquals(3, providers.length);
assertEquals(Sets.newHashSet("http://host1:16000/kms/foo/v1/",
"http://host2:16000/kms/foo/v1/",
"http://host3:16000/kms/foo/v1/"),
Sets.newHashSet(providers[0].getKMSUrl(),
providers[1].getKMSUrl(),
providers[2].getKMSUrl()));
}
@Test
public void testLoadBalancing() throws Exception {
Configuration conf = new Configuration();
KMSClientProvider p1 = mock(KMSClientProvider.class);
when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenReturn(
new KMSClientProvider.KMSKeyVersion("p1", "v1", new byte[0]));
KMSClientProvider p2 = mock(KMSClientProvider.class);
when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenReturn(
new KMSClientProvider.KMSKeyVersion("p2", "v2", new byte[0]));
KMSClientProvider p3 = mock(KMSClientProvider.class);
when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenReturn(
new KMSClientProvider.KMSKeyVersion("p3", "v3", new byte[0]));
KeyProvider kp = new LoadBalancingKMSClientProvider(
new KMSClientProvider[] { p1, p2, p3 }, 0, conf);
assertEquals("p1", kp.createKey("test1", new Options(conf)).getName());
assertEquals("p2", kp.createKey("test2", new Options(conf)).getName());
assertEquals("p3", kp.createKey("test3", new Options(conf)).getName());
assertEquals("p1", kp.createKey("test4", new Options(conf)).getName());
}
@Test
public void testLoadBalancingWithFailure() throws Exception {
Configuration conf = new Configuration();
KMSClientProvider p1 = mock(KMSClientProvider.class);
when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenReturn(
new KMSClientProvider.KMSKeyVersion("p1", "v1", new byte[0]));
when(p1.getKMSUrl()).thenReturn("p1");
// This should not be retried
KMSClientProvider p2 = mock(KMSClientProvider.class);
when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new NoSuchAlgorithmException("p2"));
when(p2.getKMSUrl()).thenReturn("p2");
KMSClientProvider p3 = mock(KMSClientProvider.class);
when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenReturn(
new KMSClientProvider.KMSKeyVersion("p3", "v3", new byte[0]));
when(p3.getKMSUrl()).thenReturn("p3");
// This should be retried
KMSClientProvider p4 = mock(KMSClientProvider.class);
when(p4.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new IOException("p4"));
when(p4.getKMSUrl()).thenReturn("p4");
KeyProvider kp = new LoadBalancingKMSClientProvider(
new KMSClientProvider[] { p1, p2, p3, p4 }, 0, conf);
assertEquals("p1", kp.createKey("test4", new Options(conf)).getName());
// Exceptions other than IOExceptions will not be retried
try {
kp.createKey("test1", new Options(conf)).getName();
fail("Should fail since its not an IOException");
} catch (Exception e) {
assertTrue(e instanceof NoSuchAlgorithmException);
}
assertEquals("p3", kp.createKey("test2", new Options(conf)).getName());
// IOException will trigger retry in next provider
assertEquals("p1", kp.createKey("test3", new Options(conf)).getName());
}
@Test
public void testLoadBalancingWithAllBadNodes() throws Exception {
Configuration conf = new Configuration();
KMSClientProvider p1 = mock(KMSClientProvider.class);
when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new IOException("p1"));
KMSClientProvider p2 = mock(KMSClientProvider.class);
when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new IOException("p2"));
KMSClientProvider p3 = mock(KMSClientProvider.class);
when(p3.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new IOException("p3"));
KMSClientProvider p4 = mock(KMSClientProvider.class);
when(p4.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new IOException("p4"));
when(p1.getKMSUrl()).thenReturn("p1");
when(p2.getKMSUrl()).thenReturn("p2");
when(p3.getKMSUrl()).thenReturn("p3");
when(p4.getKMSUrl()).thenReturn("p4");
KeyProvider kp = new LoadBalancingKMSClientProvider(
new KMSClientProvider[] { p1, p2, p3, p4 }, 0, conf);
try {
kp.createKey("test3", new Options(conf)).getName();
fail("Should fail since all providers threw an IOException");
} catch (Exception e) {
assertTrue(e instanceof IOException);
}
}
}
| 7,412 | 43.389222 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
import junit.framework.TestCase;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.File;
import java.io.FileWriter;
import java.io.Writer;
import java.util.Map;
public class TestAuthenticationFilter extends TestCase {
@SuppressWarnings("unchecked")
public void testConfiguration() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.http.authentication.foo", "bar");
conf.set(HttpServer2.BIND_ADDRESS, "barhost");
FilterContainer container = Mockito.mock(FilterContainer.class);
Mockito.doAnswer(
new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock)
throws Throwable {
Object[] args = invocationOnMock.getArguments();
assertEquals("authentication", args[0]);
assertEquals(AuthenticationFilter.class.getName(), args[1]);
Map<String, String> conf = (Map<String, String>) args[2];
assertEquals("/", conf.get("cookie.path"));
assertEquals("simple", conf.get("type"));
assertEquals("36000", conf.get("token.validity"));
assertNull(conf.get("cookie.domain"));
assertEquals("true", conf.get("simple.anonymous.allowed"));
assertEquals("HTTP/barhost@LOCALHOST",
conf.get("kerberos.principal"));
assertEquals(System.getProperty("user.home") +
"/hadoop.keytab", conf.get("kerberos.keytab"));
assertEquals("bar", conf.get("foo"));
return null;
}
}
).when(container).addFilter(Mockito.<String>anyObject(),
Mockito.<String>anyObject(),
Mockito.<Map<String, String>>anyObject());
new AuthenticationFilterInitializer().initFilter(container, conf);
}
}
| 2,951 | 36.367089 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.net.InetAddress;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.WhitelistBasedResolver;
import org.apache.hadoop.util.TestFileBasedIPList;
public class TestWhitelistBasedResolver extends TestCase {
public static final Map<String, String> SASL_PRIVACY_PROPS =
WhitelistBasedResolver.getSaslProperties(new Configuration());
public void testFixedVariableAndLocalWhiteList() throws IOException {
String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
TestFileBasedIPList.createFileWithEntries ("fixedwhitelist.txt", fixedIps);
String[] variableIps = {"10.222.0.0/16", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("variablewhitelist.txt", variableIps);
Configuration conf = new Configuration();
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE ,
"fixedwhitelist.txt");
conf.setBoolean(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE,
true);
conf.setLong(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,
1);
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE ,
"variablewhitelist.txt");
WhitelistBasedResolver wqr = new WhitelistBasedResolver ();
wqr.setConf(conf);
assertEquals (wqr.getDefaultProperties(),
wqr.getServerProperties(InetAddress.getByName("10.119.103.112")));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.119.103.113"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.221.103.121"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.221.104.0"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.222.103.121"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.223.104.0"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.113.221.221"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.222"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("127.0.0.1"));
TestFileBasedIPList.removeFile("fixedwhitelist.txt");
TestFileBasedIPList.removeFile("variablewhitelist.txt");
}
/**
* Add a bunch of subnets and IPSs to the whitelist
* Check for inclusion in whitelist
* Check for exclusion from whitelist
*/
public void testFixedAndLocalWhiteList() throws IOException {
String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
TestFileBasedIPList.createFileWithEntries ("fixedwhitelist.txt", fixedIps);
String[] variableIps = {"10.222.0.0/16", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("variablewhitelist.txt", variableIps);
Configuration conf = new Configuration();
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE ,
"fixedwhitelist.txt");
conf.setBoolean(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE,
false);
conf.setLong(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,
100);
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE ,
"variablewhitelist.txt");
WhitelistBasedResolver wqr = new WhitelistBasedResolver();
wqr.setConf(conf);
assertEquals (wqr.getDefaultProperties(),
wqr.getServerProperties(InetAddress.getByName("10.119.103.112")));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.119.103.113"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.221.103.121"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.221.104.0"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.222.103.121"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.223.104.0"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.221"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.222"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("127.0.0.1"));;
TestFileBasedIPList.removeFile("fixedwhitelist.txt");
TestFileBasedIPList.removeFile("variablewhitelist.txt");
}
/**
* Add a bunch of subnets and IPSs to the whitelist
* Check for inclusion in whitelist with a null value
*/
public void testNullIPAddress() throws IOException {
String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
TestFileBasedIPList.createFileWithEntries ("fixedwhitelist.txt", fixedIps);
String[] variableIps = {"10.222.0.0/16", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("variablewhitelist.txt", variableIps);
Configuration conf = new Configuration();
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE ,
"fixedwhitelist.txt");
conf.setBoolean(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE,
true);
conf.setLong(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,
100);
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE ,
"variablewhitelist.txt");
WhitelistBasedResolver wqr = new WhitelistBasedResolver();
wqr.setConf(conf);
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties((InetAddress)null));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties((String)null));
TestFileBasedIPList.removeFile("fixedwhitelist.txt");
TestFileBasedIPList.removeFile("variablewhitelist.txt");
}
}
| 6,503 | 38.658537 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.minikdc.MiniKdc;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.File;
/**
* Verify UGI login from keytab. Check that the UGI is
* configured to use keytab to catch regressions like
* HADOOP-10786.
*/
public class TestUGILoginFromKeytab {
private MiniKdc kdc;
private File workDir;
@Rule
public final TemporaryFolder folder = new TemporaryFolder();
@Before
public void startMiniKdc() throws Exception {
// This setting below is required. If not enabled, UGI will abort
// any attempt to loginUserFromKeytab.
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
workDir = folder.getRoot();
kdc = new MiniKdc(MiniKdc.createConf(), workDir);
kdc.start();
}
@After
public void stopMiniKdc() {
if (kdc != null) {
kdc.stop();
}
}
/**
* Login from keytab using the MiniKDC and verify the UGI can successfully
* relogin from keytab as well. This will catch regressions like HADOOP-10786.
*/
@Test
public void testUGILoginFromKeytab() throws Exception {
UserGroupInformation.setShouldRenewImmediatelyForTests(true);
String principal = "foo";
File keytab = new File(workDir, "foo.keytab");
kdc.createPrincipal(keytab, principal);
UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath());
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
Assert.assertTrue("UGI should be configured to login from keytab",
ugi.isFromKeytab());
// Verify relogin from keytab.
User user = ugi.getSubject().getPrincipals(User.class).iterator().next();
final long firstLogin = user.getLastLogin();
ugi.reloginFromKeytab();
final long secondLogin = user.getLastLogin();
Assert.assertTrue("User should have been able to relogin from keytab",
secondLogin > firstLogin);
}
}
| 3,046 | 32.119565 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.util.Arrays;
import java.util.List;
import javax.naming.CommunicationException;
import javax.naming.NamingException;
import javax.naming.directory.SearchControls;
import javax.naming.directory.SearchResult;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@SuppressWarnings("unchecked")
public class TestLdapGroupsMapping extends TestLdapGroupsMappingBase {
@Before
public void setupMocks() throws NamingException {
SearchResult mockUserResult = mock(SearchResult.class);
when(mockUserNamingEnum.nextElement()).thenReturn(mockUserResult);
when(mockUserResult.getNameInNamespace()).thenReturn("CN=some_user,DC=test,DC=com");
}
@Test
public void testGetGroups() throws IOException, NamingException {
// The search functionality of the mock context is reused, so we will
// return the user NamingEnumeration first, and then the group
when(mockContext.search(anyString(), anyString(), any(Object[].class),
any(SearchControls.class)))
.thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
doTestGetGroups(Arrays.asList(testGroups), 2);
}
@Test
public void testGetGroupsWithConnectionClosed() throws IOException, NamingException {
// The case mocks connection is closed/gc-ed, so the first search call throws CommunicationException,
// then after reconnected return the user NamingEnumeration first, and then the group
when(mockContext.search(anyString(), anyString(), any(Object[].class),
any(SearchControls.class)))
.thenThrow(new CommunicationException("Connection is closed"))
.thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
// Although connection is down but after reconnected it still should retrieve the result groups
doTestGetGroups(Arrays.asList(testGroups), 1 + 2); // 1 is the first failure call
}
@Test
public void testGetGroupsWithLdapDown() throws IOException, NamingException {
// This mocks the case where Ldap server is down, and always throws CommunicationException
when(mockContext.search(anyString(), anyString(), any(Object[].class),
any(SearchControls.class)))
.thenThrow(new CommunicationException("Connection is closed"));
// Ldap server is down, no groups should be retrieved
doTestGetGroups(Arrays.asList(new String[] {}),
1 + LdapGroupsMapping.RECONNECT_RETRY_COUNT); // 1 is the first normal call
}
private void doTestGetGroups(List<String> expectedGroups, int searchTimes) throws IOException, NamingException {
Configuration conf = new Configuration();
// Set this, so we don't throw an exception
conf.set(LdapGroupsMapping.LDAP_URL_KEY, "ldap://test");
mappingSpy.setConf(conf);
// Username is arbitrary, since the spy is mocked to respond the same,
// regardless of input
List<String> groups = mappingSpy.getGroups("some_user");
Assert.assertEquals(expectedGroups, groups);
// We should have searched for a user, and then two groups
verify(mockContext, times(searchTimes)).search(anyString(),
anyString(),
any(Object[].class),
any(SearchControls.class));
}
@Test
public void testExtractPassword() throws IOException {
File testDir = new File(System.getProperty("test.build.data",
"target/test-dir"));
testDir.mkdirs();
File secretFile = new File(testDir, "secret.txt");
Writer writer = new FileWriter(secretFile);
writer.write("hadoop");
writer.close();
LdapGroupsMapping mapping = new LdapGroupsMapping();
Assert.assertEquals("hadoop",
mapping.extractPassword(secretFile.getPath()));
}
@Test
public void testConfGetPassword() throws Exception {
File testDir = new File(System.getProperty("test.build.data",
"target/test-dir"));
Configuration conf = new Configuration();
final Path jksPath = new Path(testDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
CredentialProvider provider =
CredentialProviderFactory.getProviders(conf).get(0);
char[] bindpass = {'b', 'i', 'n', 'd', 'p', 'a', 's', 's'};
char[] storepass = {'s', 't', 'o', 'r', 'e', 'p', 'a', 's', 's'};
// ensure that we get nulls when the key isn't there
assertEquals(null, provider.getCredentialEntry(
LdapGroupsMapping.BIND_PASSWORD_KEY));
assertEquals(null, provider.getCredentialEntry
(LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY));
// create new aliases
try {
provider.createCredentialEntry(
LdapGroupsMapping.BIND_PASSWORD_KEY, bindpass);
provider.createCredentialEntry(
LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY, storepass);
provider.flush();
} catch (Exception e) {
e.printStackTrace();
throw e;
}
// make sure we get back the right key
assertArrayEquals(bindpass, provider.getCredentialEntry(
LdapGroupsMapping.BIND_PASSWORD_KEY).getCredential());
assertArrayEquals(storepass, provider.getCredentialEntry(
LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY).getCredential());
LdapGroupsMapping mapping = new LdapGroupsMapping();
Assert.assertEquals("bindpass",
mapping.getPassword(conf, LdapGroupsMapping.BIND_PASSWORD_KEY, ""));
Assert.assertEquals("storepass",
mapping.getPassword(conf, LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY,
""));
// let's make sure that a password that doesn't exist returns an
// empty string as currently expected and used to trigger a call to
// extract password
Assert.assertEquals("", mapping.getPassword(conf,"invalid-alias", ""));
}
}
| 7,403 | 40.595506 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/NetUtilsTestResolver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.security.SecurityUtil.QualifiedHostResolver;
/**
* provides a dummy dns search resolver with a configurable search path
* and host mapping
*/
public class NetUtilsTestResolver extends QualifiedHostResolver {
Map<String, InetAddress> resolvedHosts = new HashMap<String, InetAddress>();
List<String> hostSearches = new LinkedList<String>();
public static NetUtilsTestResolver install() {
NetUtilsTestResolver resolver = new NetUtilsTestResolver();
resolver.setSearchDomains("a.b", "b", "c");
resolver.addResolvedHost("host.a.b.", "1.1.1.1");
resolver.addResolvedHost("b-host.b.", "2.2.2.2");
resolver.addResolvedHost("simple.", "3.3.3.3");
SecurityUtil.hostResolver = resolver;
return resolver;
}
public void addResolvedHost(String host, String ip) {
InetAddress addr;
try {
addr = InetAddress.getByName(ip);
addr = InetAddress.getByAddress(host, addr.getAddress());
} catch (UnknownHostException e) {
throw new IllegalArgumentException("not an ip:"+ip);
}
resolvedHosts.put(host, addr);
}
@Override
public InetAddress getInetAddressByName(String host) throws UnknownHostException {
hostSearches.add(host);
if (!resolvedHosts.containsKey(host)) {
throw new UnknownHostException(host);
}
return resolvedHosts.get(host);
}
@Override
public InetAddress getByExactName(String host) {
return super.getByExactName(host);
}
@Override
public InetAddress getByNameWithSearch(String host) {
return super.getByNameWithSearch(host);
}
public String[] getHostSearches() {
return hostSearches.toArray(new String[0]);
}
public void reset() {
hostSearches.clear();
}
}
| 2,750 | 30.988372 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.FakeTimer;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
public class TestGroupsCaching {
public static final Log LOG = LogFactory.getLog(TestGroupsCaching.class);
private static String[] myGroups = {"grp1", "grp2"};
private Configuration conf;
@Before
public void setup() {
FakeGroupMapping.resetRequestCount();
ExceptionalGroupMapping.resetRequestCount();
conf = new Configuration();
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
FakeGroupMapping.class,
ShellBasedUnixGroupsMapping.class);
}
public static class FakeGroupMapping extends ShellBasedUnixGroupsMapping {
// any to n mapping
private static Set<String> allGroups = new HashSet<String>();
private static Set<String> blackList = new HashSet<String>();
private static int requestCount = 0;
private static long getGroupsDelayMs = 0;
@Override
public List<String> getGroups(String user) throws IOException {
LOG.info("Getting groups for " + user);
requestCount++;
delayIfNecessary();
if (blackList.contains(user)) {
return new LinkedList<String>();
}
return new LinkedList<String>(allGroups);
}
private void delayIfNecessary() {
if (getGroupsDelayMs > 0) {
try {
Thread.sleep(getGroupsDelayMs);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
@Override
public void cacheGroupsRefresh() throws IOException {
LOG.info("Cache is being refreshed.");
clearBlackList();
return;
}
public static void clearBlackList() throws IOException {
LOG.info("Clearing the blacklist");
blackList.clear();
}
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
LOG.info("Adding " + groups + " to groups.");
allGroups.addAll(groups);
}
public static void addToBlackList(String user) throws IOException {
LOG.info("Adding " + user + " to the blacklist");
blackList.add(user);
}
public static int getRequestCount() {
return requestCount;
}
public static void resetRequestCount() {
requestCount = 0;
}
public static void setGetGroupsDelayMs(long delayMs) {
getGroupsDelayMs = delayMs;
}
}
public static class ExceptionalGroupMapping extends ShellBasedUnixGroupsMapping {
private static int requestCount = 0;
@Override
public List<String> getGroups(String user) throws IOException {
requestCount++;
throw new IOException("For test");
}
public static int getRequestCount() {
return requestCount;
}
public static void resetRequestCount() {
requestCount = 0;
}
}
@Test
public void testGroupsCaching() throws Exception {
// Disable negative cache.
conf.setLong(
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 0);
Groups groups = new Groups(conf);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
FakeGroupMapping.addToBlackList("user1");
// regular entry
assertTrue(groups.getGroups("me").size() == 2);
// this must be cached. blacklisting should have no effect.
FakeGroupMapping.addToBlackList("me");
assertTrue(groups.getGroups("me").size() == 2);
// ask for a negative entry
try {
LOG.error("We are not supposed to get here." + groups.getGroups("user1").toString());
fail();
} catch (IOException ioe) {
if(!ioe.getMessage().startsWith("No groups found")) {
LOG.error("Got unexpected exception: " + ioe.getMessage());
fail();
}
}
// this shouldn't be cached. remove from the black list and retry.
FakeGroupMapping.clearBlackList();
assertTrue(groups.getGroups("user1").size() == 2);
}
public static class FakeunPrivilegedGroupMapping extends FakeGroupMapping {
private static boolean invoked = false;
@Override
public List<String> getGroups(String user) throws IOException {
invoked = true;
return super.getGroups(user);
}
}
/*
* Group lookup should not happen for static users
*/
@Test
public void testGroupLookupForStaticUsers() throws Exception {
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
FakeunPrivilegedGroupMapping.class, ShellBasedUnixGroupsMapping.class);
conf.set(CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES, "me=;user1=group1;user2=group1,group2");
Groups groups = new Groups(conf);
List<String> userGroups = groups.getGroups("me");
assertTrue("non-empty groups for static user", userGroups.isEmpty());
assertFalse("group lookup done for static user",
FakeunPrivilegedGroupMapping.invoked);
List<String> expected = new ArrayList<String>();
expected.add("group1");
FakeunPrivilegedGroupMapping.invoked = false;
userGroups = groups.getGroups("user1");
assertTrue("groups not correct", expected.equals(userGroups));
assertFalse("group lookup done for unprivileged user",
FakeunPrivilegedGroupMapping.invoked);
expected.add("group2");
FakeunPrivilegedGroupMapping.invoked = false;
userGroups = groups.getGroups("user2");
assertTrue("groups not correct", expected.equals(userGroups));
assertFalse("group lookup done for unprivileged user",
FakeunPrivilegedGroupMapping.invoked);
}
@Test
public void testNegativeGroupCaching() throws Exception {
final String user = "negcache";
final String failMessage = "Did not throw IOException: ";
conf.setLong(
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 2);
FakeTimer timer = new FakeTimer();
Groups groups = new Groups(conf, timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.addToBlackList(user);
// In the first attempt, the user will be put in the negative cache.
try {
groups.getGroups(user);
fail(failMessage + "Failed to obtain groups from FakeGroupMapping.");
} catch (IOException e) {
// Expects to raise exception for the first time. But the user will be
// put into the negative cache
GenericTestUtils.assertExceptionContains("No groups found for user", e);
}
// The second time, the user is in the negative cache.
try {
groups.getGroups(user);
fail(failMessage + "The user is in the negative cache.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user", e);
}
// Brings back the backend user-group mapping service.
FakeGroupMapping.clearBlackList();
// It should still get groups from the negative cache.
try {
groups.getGroups(user);
fail(failMessage + "The user is still in the negative cache, even " +
"FakeGroupMapping has resumed.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user", e);
}
// Let the elements in the negative cache expire.
timer.advance(4 * 1000);
// The groups for the user is expired in the negative cache, a new copy of
// groups for the user is fetched.
assertEquals(Arrays.asList(myGroups), groups.getGroups(user));
}
@Test
public void testCachePreventsImplRequest() throws Exception {
// Disable negative cache.
conf.setLong(
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 0);
Groups groups = new Groups(conf);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
assertEquals(0, FakeGroupMapping.getRequestCount());
// First call hits the wire
assertTrue(groups.getGroups("me").size() == 2);
assertEquals(1, FakeGroupMapping.getRequestCount());
// Second count hits cache
assertTrue(groups.getGroups("me").size() == 2);
assertEquals(1, FakeGroupMapping.getRequestCount());
}
@Test
public void testExceptionsFromImplNotCachedInNegativeCache() {
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
ExceptionalGroupMapping.class,
ShellBasedUnixGroupsMapping.class);
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 10000);
Groups groups = new Groups(conf);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
assertEquals(0, ExceptionalGroupMapping.getRequestCount());
// First call should hit the wire
try {
groups.getGroups("anything");
fail("Should have thrown");
} catch (IOException e) {
// okay
}
assertEquals(1, ExceptionalGroupMapping.getRequestCount());
// Second call should hit the wire (no negative caching)
try {
groups.getGroups("anything");
fail("Should have thrown");
} catch (IOException e) {
// okay
}
assertEquals(2, ExceptionalGroupMapping.getRequestCount());
}
@Test
public void testOnlyOneRequestWhenNoEntryIsCached() throws Exception {
// Disable negative cache.
conf.setLong(
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 0);
final Groups groups = new Groups(conf);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
FakeGroupMapping.setGetGroupsDelayMs(100);
ArrayList<Thread> threads = new ArrayList<Thread>();
for (int i = 0; i < 10; i++) {
threads.add(new Thread() {
public void run() {
try {
assertEquals(2, groups.getGroups("me").size());
} catch (IOException e) {
fail("Should not happen");
}
}
});
}
// We start a bunch of threads who all see no cached value
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
// But only one thread should have made the request
assertEquals(1, FakeGroupMapping.getRequestCount());
}
@Test
public void testOnlyOneRequestWhenExpiredEntryExists() throws Exception {
conf.setLong(
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
FakeTimer timer = new FakeTimer();
final Groups groups = new Groups(conf, timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
FakeGroupMapping.setGetGroupsDelayMs(100);
// We make an initial request to populate the cache
groups.getGroups("me");
int startingRequestCount = FakeGroupMapping.getRequestCount();
// Then expire that entry
timer.advance(400 * 1000);
Thread.sleep(100);
ArrayList<Thread> threads = new ArrayList<Thread>();
for (int i = 0; i < 10; i++) {
threads.add(new Thread() {
public void run() {
try {
assertEquals(2, groups.getGroups("me").size());
} catch (IOException e) {
fail("Should not happen");
}
}
});
}
// We start a bunch of threads who all see the cached value
for (Thread t : threads) {
t.start();
}
for (Thread t : threads) {
t.join();
}
// Only one extra request is made
assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
}
@Test
public void testCacheEntriesExpire() throws Exception {
conf.setLong(
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
FakeTimer timer = new FakeTimer();
final Groups groups = new Groups(conf, timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
// We make an entry
groups.getGroups("me");
int startingRequestCount = FakeGroupMapping.getRequestCount();
timer.advance(20 * 1000);
// Cache entry has expired so it results in a new fetch
groups.getGroups("me");
assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
}
@Test
public void testNegativeCacheClearedOnRefresh() throws Exception {
conf.setLong(
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 100);
final Groups groups = new Groups(conf);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
FakeGroupMapping.addToBlackList("dne");
try {
groups.getGroups("dne");
fail("Should have failed to find this group");
} catch (IOException e) {
// pass
}
int startingRequestCount = FakeGroupMapping.getRequestCount();
groups.refresh();
FakeGroupMapping.addToBlackList("dne");
try {
List<String> g = groups.getGroups("dne");
fail("Should have failed to find this group");
} catch (IOException e) {
// pass
}
assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
}
@Test
public void testNegativeCacheEntriesExpire() throws Exception {
conf.setLong(
CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 2);
FakeTimer timer = new FakeTimer();
// Ensure that stale entries are removed from negative cache every 2 seconds
Groups groups = new Groups(conf, timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
// Add both these users to blacklist so that they
// can be added to negative cache
FakeGroupMapping.addToBlackList("user1");
FakeGroupMapping.addToBlackList("user2");
// Put user1 in negative cache.
try {
groups.getGroups("user1");
fail("Did not throw IOException : Failed to obtain groups" +
" from FakeGroupMapping.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user", e);
}
// Check if user1 exists in negative cache
assertTrue(groups.getNegativeCache().contains("user1"));
// Advance fake timer
timer.advance(1000);
// Put user2 in negative cache
try {
groups.getGroups("user2");
fail("Did not throw IOException : Failed to obtain groups" +
" from FakeGroupMapping.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user", e);
}
// Check if user2 exists in negative cache
assertTrue(groups.getNegativeCache().contains("user2"));
// Advance timer. Only user2 should be present in negative cache.
timer.advance(1100);
assertFalse(groups.getNegativeCache().contains("user1"));
assertTrue(groups.getNegativeCache().contains("user2"));
// Advance timer. Even user2 should not be present in negative cache.
timer.advance(1000);
assertFalse(groups.getNegativeCache().contains("user2"));
}
}
| 16,402 | 31.353057 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestNetgroupCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Test;
public class TestNetgroupCache {
private static final String USER1 = "user1";
private static final String USER2 = "user2";
private static final String USER3 = "user3";
private static final String GROUP1 = "group1";
private static final String GROUP2 = "group2";
@After
public void teardown() {
NetgroupCache.clear();
}
/**
* Cache two groups with a set of users.
* Test membership correctness.
*/
@Test
public void testMembership() {
List<String> users = new ArrayList<String>();
users.add(USER1);
users.add(USER2);
NetgroupCache.add(GROUP1, users);
users = new ArrayList<String>();
users.add(USER1);
users.add(USER3);
NetgroupCache.add(GROUP2, users);
verifyGroupMembership(USER1, 2, GROUP1);
verifyGroupMembership(USER1, 2, GROUP2);
verifyGroupMembership(USER2, 1, GROUP1);
verifyGroupMembership(USER3, 1, GROUP2);
}
/**
* Cache a group with a set of users.
* Test membership correctness.
* Clear cache, remove a user from the group and cache the group
* Test membership correctness.
*/
@Test
public void testUserRemoval() {
List<String> users = new ArrayList<String>();
users.add(USER1);
users.add(USER2);
NetgroupCache.add(GROUP1, users);
verifyGroupMembership(USER1, 1, GROUP1);
verifyGroupMembership(USER2, 1, GROUP1);
users.remove(USER2);
NetgroupCache.clear();
NetgroupCache.add(GROUP1, users);
verifyGroupMembership(USER1, 1, GROUP1);
verifyGroupMembership(USER2, 0, null);
}
/**
* Cache two groups with a set of users.
* Test membership correctness.
* Clear cache, cache only one group.
* Test membership correctness.
*/
@Test
public void testGroupRemoval() {
List<String> users = new ArrayList<String>();
users.add(USER1);
users.add(USER2);
NetgroupCache.add(GROUP1, users);
users = new ArrayList<String>();
users.add(USER1);
users.add(USER3);
NetgroupCache.add(GROUP2, users);
verifyGroupMembership(USER1, 2, GROUP1);
verifyGroupMembership(USER1, 2, GROUP2);
verifyGroupMembership(USER2, 1, GROUP1);
verifyGroupMembership(USER3, 1, GROUP2);
NetgroupCache.clear();
users = new ArrayList<String>();
users.add(USER1);
users.add(USER2);
NetgroupCache.add(GROUP1, users);
verifyGroupMembership(USER1, 1, GROUP1);
verifyGroupMembership(USER2, 1, GROUP1);
verifyGroupMembership(USER3, 0, null);
}
private void verifyGroupMembership(String user, int size, String group) {
List<String> groups = new ArrayList<String>();
NetgroupCache.getNetgroups(user, groups);
assertEquals(size, groups.size());
if (size > 0) {
boolean present = false;
for (String groupEntry:groups) {
if (groupEntry.equals(group)) {
present = true;
break;
}
}
assertTrue(present);
}
}
}
| 3,928 | 29.695313 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserFromEnv.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import org.junit.Assert;
import org.junit.Test;
public class TestUserFromEnv {
@Test
public void testUserFromEnvironment() throws IOException {
System.setProperty(UserGroupInformation.HADOOP_USER_NAME, "randomUser");
Assert.assertEquals("randomUser", UserGroupInformation.getLoginUser()
.getUserName());
}
}
| 1,197 | 35.30303 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Enumeration;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSecretManager;
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenIdentifier;
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSelector;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
/**
*
*/
public class TestDoAsEffectiveUser {
final private static String REAL_USER_NAME = "[email protected]";
final private static String REAL_USER_SHORT_NAME = "realUser1";
final private static String PROXY_USER_NAME = "proxyUser";
final private static String GROUP1_NAME = "group1";
final private static String GROUP2_NAME = "group2";
final private static String[] GROUP_NAMES = new String[] { GROUP1_NAME,
GROUP2_NAME };
private static final String ADDRESS = "0.0.0.0";
private TestProtocol proxy;
private static final Configuration masterConf = new Configuration();
public static final Log LOG = LogFactory
.getLog(TestDoAsEffectiveUser.class);
static {
masterConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[2:$1@$0](.*@HADOOP.APACHE.ORG)s/@.*//" +
"RULE:[1:$1@$0](.*@HADOOP.APACHE.ORG)s/@.*//"
+ "DEFAULT");
}
@Before
public void setMasterConf() throws IOException {
UserGroupInformation.setConfiguration(masterConf);
refreshConf(masterConf);
}
private void configureSuperUserIPAddresses(Configuration conf,
String superUserShortName) throws IOException {
ArrayList<String> ipList = new ArrayList<String>();
Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
.getNetworkInterfaces();
while (netInterfaceList.hasMoreElements()) {
NetworkInterface inf = netInterfaceList.nextElement();
Enumeration<InetAddress> addrList = inf.getInetAddresses();
while (addrList.hasMoreElements()) {
InetAddress addr = addrList.nextElement();
ipList.add(addr.getHostAddress());
}
}
StringBuilder builder = new StringBuilder();
for (String ip : ipList) {
builder.append(ip);
builder.append(',');
}
builder.append("127.0.1.1,");
builder.append(InetAddress.getLocalHost().getCanonicalHostName());
LOG.info("Local Ip addresses: "+builder.toString());
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(superUserShortName),
builder.toString());
}
/**
* Test method for
* {@link org.apache.hadoop.security.UserGroupInformation#createProxyUser(java.lang.String, org.apache.hadoop.security.UserGroupInformation)}
* .
*/
@Test
public void testCreateProxyUser() throws Exception {
// ensure that doAs works correctly
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUser(
PROXY_USER_NAME, realUserUgi);
UserGroupInformation curUGI = proxyUserUgi
.doAs(new PrivilegedExceptionAction<UserGroupInformation>() {
@Override
public UserGroupInformation run() throws IOException {
return UserGroupInformation.getCurrentUser();
}
});
Assert.assertEquals(
PROXY_USER_NAME + " (auth:PROXY) via " + REAL_USER_NAME + " (auth:SIMPLE)",
curUGI.toString());
}
@TokenInfo(TestTokenSelector.class)
public interface TestProtocol extends VersionedProtocol {
public static final long versionID = 1L;
String aMethod() throws IOException;
String getServerRemoteUser() throws IOException;
}
public class TestImpl implements TestProtocol {
@Override
public String aMethod() throws IOException {
return UserGroupInformation.getCurrentUser().toString();
}
@Override
public String getServerRemoteUser() throws IOException {
return Server.getRemoteUser().toString();
}
@Override
public long getProtocolVersion(String protocol, long clientVersion)
throws IOException {
return TestProtocol.versionID;
}
@Override
public ProtocolSignature getProtocolSignature(String protocol,
long clientVersion, int clientMethodsHash) throws IOException {
return new ProtocolSignature(TestProtocol.versionID, null);
}
}
private void checkRemoteUgi(final Server server,
final UserGroupInformation ugi, final Configuration conf)
throws Exception {
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
proxy = RPC.getProxy(
TestProtocol.class, TestProtocol.versionID,
NetUtils.getConnectAddress(server), conf);
Assert.assertEquals(ugi.toString(), proxy.aMethod());
Assert.assertEquals(ugi.toString(), proxy.getServerRemoteUser());
return null;
}
});
}
@Test(timeout=4000)
public void testRealUserSetup() throws IOException {
final Configuration conf = new Configuration();
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).build();
refreshConf(conf);
try {
server.start();
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
checkRemoteUgi(server, realUserUgi, conf);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
checkRemoteUgi(server, proxyUserUgi, conf);
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@Test(timeout=4000)
public void testRealUserAuthorizationSuccess() throws IOException {
final Configuration conf = new Configuration();
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
"group1");
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
checkRemoteUgi(server, realUserUgi, conf);
UserGroupInformation proxyUserUgi = UserGroupInformation
.createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
checkRemoteUgi(server, proxyUserUgi, conf);
} catch (Exception e) {
e.printStackTrace();
Assert.fail();
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
/*
* Tests authorization of superuser's ip.
*/
@Test
public void testRealUserIPAuthorizationFailure() throws IOException {
final Configuration conf = new Configuration();
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_SHORT_NAME),
"20.20.20.20"); //Authorized IP address
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
"group1");
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation
.createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
String retVal = proxyUserUgi
.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws IOException {
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
String ret = proxy.aMethod();
return ret;
}
});
Assert.fail("The RPC must have failed " + retVal);
} catch (Exception e) {
e.printStackTrace();
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@Test
public void testRealUserIPNotSpecified() throws IOException {
final Configuration conf = new Configuration();
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation
.createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
String retVal = proxyUserUgi
.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws IOException {
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
String ret = proxy.aMethod();
return ret;
}
});
Assert.fail("The RPC must have failed " + retVal);
} catch (Exception e) {
e.printStackTrace();
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@Test
public void testRealUserGroupNotSpecified() throws IOException {
final Configuration conf = new Configuration();
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
try {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation
.createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
String retVal = proxyUserUgi
.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws IOException {
proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
String ret = proxy.aMethod();
return ret;
}
});
Assert.fail("The RPC must have failed " + retVal);
} catch (Exception e) {
e.printStackTrace();
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
@Test
public void testRealUserGroupAuthorizationFailure() throws IOException {
final Configuration conf = new Configuration();
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
"group3");
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation
.createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
String retVal = proxyUserUgi
.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws IOException {
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
String ret = proxy.aMethod();
return ret;
}
});
Assert.fail("The RPC must have failed " + retVal);
} catch (Exception e) {
e.printStackTrace();
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
/*
* Tests the scenario when token authorization is used.
* The server sees only the the owner of the token as the
* user.
*/
@Test
public void testProxyWithToken() throws Exception {
final Configuration conf = new Configuration(masterConf);
TestTokenSecretManager sm = new TestTokenSecretManager();
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
UserGroupInformation.setConfiguration(conf);
final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
.getUserName()), new Text("SomeSuperUser"));
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
sm);
SecurityUtil.setTokenService(token, addr);
UserGroupInformation proxyUserUgi = UserGroupInformation
.createProxyUserForTesting(PROXY_USER_NAME, current, GROUP_NAMES);
proxyUserUgi.addToken(token);
refreshConf(conf);
String retVal = proxyUserUgi.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
try {
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, conf);
String ret = proxy.aMethod();
return ret;
} catch (Exception e) {
e.printStackTrace();
throw e;
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
});
//The user returned by server must be the one in the token.
Assert.assertEquals(REAL_USER_NAME + " (auth:TOKEN) via SomeSuperUser (auth:SIMPLE)", retVal);
}
/*
* The user gets the token via a superuser. Server should authenticate
* this user.
*/
@Test
public void testTokenBySuperUser() throws Exception {
TestTokenSecretManager sm = new TestTokenSecretManager();
final Configuration newConf = new Configuration(masterConf);
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
UserGroupInformation.setConfiguration(newConf);
final Server server = new RPC.Builder(newConf)
.setProtocol(TestProtocol.class).setInstance(new TestImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
server.start();
final UserGroupInformation current = UserGroupInformation
.createUserForTesting(REAL_USER_NAME, GROUP_NAMES);
refreshConf(newConf);
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
.getUserName()), new Text("SomeSuperUser"));
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
sm);
SecurityUtil.setTokenService(token, addr);
current.addToken(token);
String retVal = current.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws Exception {
try {
proxy = RPC.getProxy(TestProtocol.class,
TestProtocol.versionID, addr, newConf);
String ret = proxy.aMethod();
return ret;
} catch (Exception e) {
e.printStackTrace();
throw e;
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
});
String expected = REAL_USER_NAME + " (auth:TOKEN) via SomeSuperUser (auth:SIMPLE)";
Assert.assertEquals(retVal + "!=" + expected, expected, retVal);
}
//
private void refreshConf(Configuration conf) throws IOException {
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
}
| 19,140 | 34.64432 | 143 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithExternalKdc.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import static org.apache.hadoop.security.SecurityUtilTestHelper.isExternalKdcRunning;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
/**
* Tests kerberos keytab login using a user-specified external KDC
*
* To run, users must specify the following system properties:
* externalKdc=true
* java.security.krb5.conf
* user.principal
* user.keytab
*/
public class TestUGIWithExternalKdc {
@Before
public void testExternalKdcRunning() {
Assume.assumeTrue(isExternalKdcRunning());
}
@Test
public void testLogin() throws IOException {
String userPrincipal = System.getProperty("user.principal");
String userKeyTab = System.getProperty("user.keytab");
Assert.assertNotNull("User principal was not specified", userPrincipal);
Assert.assertNotNull("User keytab was not specified", userKeyTab);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi = UserGroupInformation
.loginUserFromKeytabAndReturnUGI(userPrincipal, userKeyTab);
Assert.assertEquals(AuthenticationMethod.KERBEROS,
ugi.getAuthenticationMethod());
try {
UserGroupInformation
.loginUserFromKeytabAndReturnUGI("[email protected]", userKeyTab);
Assert.fail("Login should have failed");
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
| 2,560 | 33.146667 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCredentials.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.security.Key;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.Map;
import java.util.Collection;
import javax.crypto.KeyGenerator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestCredentials {
private static final String DEFAULT_HMAC_ALGORITHM = "HmacSHA1";
private static final File tmpDir =
new File(System.getProperty("test.build.data", "/tmp"), "mapred");
@Before
public void setUp() {
tmpDir.mkdir();
}
@After
public void tearDown() {
tmpDir.delete();
}
@SuppressWarnings("unchecked")
@Test
public <T extends TokenIdentifier> void testReadWriteStorage()
throws IOException, NoSuchAlgorithmException{
// create tokenStorage Object
Credentials ts = new Credentials();
Token<T> token1 = new Token();
Token<T> token2 = new Token();
Text service1 = new Text("service1");
Text service2 = new Text("service2");
Collection<Text> services = new ArrayList<Text>();
services.add(service1);
services.add(service2);
token1.setService(service1);
token2.setService(service2);
ts.addToken(new Text("sometoken1"), token1);
ts.addToken(new Text("sometoken2"), token2);
// create keys and put it in
final KeyGenerator kg = KeyGenerator.getInstance(DEFAULT_HMAC_ALGORITHM);
String alias = "alias";
Map<Text, byte[]> m = new HashMap<Text, byte[]>(10);
for(int i=0; i<10; i++) {
Key key = kg.generateKey();
m.put(new Text(alias+i), key.getEncoded());
ts.addSecretKey(new Text(alias+i), key.getEncoded());
}
// create file to store
File tmpFileName = new File(tmpDir, "tokenStorageTest");
DataOutputStream dos =
new DataOutputStream(new FileOutputStream(tmpFileName));
ts.write(dos);
dos.close();
// open and read it back
DataInputStream dis =
new DataInputStream(new FileInputStream(tmpFileName));
ts = new Credentials();
ts.readFields(dis);
dis.close();
// get the tokens and compare the services
Collection<Token<? extends TokenIdentifier>> list = ts.getAllTokens();
assertEquals("getAllTokens should return collection of size 2",
list.size(), 2);
boolean foundFirst = false;
boolean foundSecond = false;
for (Token<? extends TokenIdentifier> token : list) {
if (token.getService().equals(service1)) {
foundFirst = true;
}
if (token.getService().equals(service2)) {
foundSecond = true;
}
}
assertTrue("Tokens for services service1 and service2 must be present",
foundFirst && foundSecond);
// compare secret keys
int mapLen = m.size();
assertEquals("wrong number of keys in the Storage",
mapLen, ts.numberOfSecretKeys());
for(Text a : m.keySet()) {
byte [] kTS = ts.getSecretKey(a);
byte [] kLocal = m.get(a);
assertTrue("keys don't match for " + a,
WritableComparator.compareBytes(kTS, 0, kTS.length, kLocal,
0, kLocal.length)==0);
}
tmpFileName.delete();
}
static Text secret[] = {
new Text("secret1"),
new Text("secret2"),
new Text("secret3"),
new Text("secret4")
};
static Text service[] = {
new Text("service1"),
new Text("service2"),
new Text("service3"),
new Text("service4")
};
static Token<?> token[] = {
new Token<TokenIdentifier>(),
new Token<TokenIdentifier>(),
new Token<TokenIdentifier>(),
new Token<TokenIdentifier>()
};
@Test
public void addAll() {
Credentials creds = new Credentials();
creds.addToken(service[0], token[0]);
creds.addToken(service[1], token[1]);
creds.addSecretKey(secret[0], secret[0].getBytes());
creds.addSecretKey(secret[1], secret[1].getBytes());
Credentials credsToAdd = new Credentials();
// one duplicate with different value, one new
credsToAdd.addToken(service[0], token[3]);
credsToAdd.addToken(service[2], token[2]);
credsToAdd.addSecretKey(secret[0], secret[3].getBytes());
credsToAdd.addSecretKey(secret[2], secret[2].getBytes());
creds.addAll(credsToAdd);
assertEquals(3, creds.numberOfTokens());
assertEquals(3, creds.numberOfSecretKeys());
// existing token & secret should be overwritten
assertEquals(token[3], creds.getToken(service[0]));
assertEquals(secret[3], new Text(creds.getSecretKey(secret[0])));
// non-duplicate token & secret should be present
assertEquals(token[1], creds.getToken(service[1]));
assertEquals(secret[1], new Text(creds.getSecretKey(secret[1])));
// new token & secret should be added
assertEquals(token[2], creds.getToken(service[2]));
assertEquals(secret[2], new Text(creds.getSecretKey(secret[2])));
}
@Test
public void mergeAll() {
Credentials creds = new Credentials();
creds.addToken(service[0], token[0]);
creds.addToken(service[1], token[1]);
creds.addSecretKey(secret[0], secret[0].getBytes());
creds.addSecretKey(secret[1], secret[1].getBytes());
Credentials credsToAdd = new Credentials();
// one duplicate with different value, one new
credsToAdd.addToken(service[0], token[3]);
credsToAdd.addToken(service[2], token[2]);
credsToAdd.addSecretKey(secret[0], secret[3].getBytes());
credsToAdd.addSecretKey(secret[2], secret[2].getBytes());
creds.mergeAll(credsToAdd);
assertEquals(3, creds.numberOfTokens());
assertEquals(3, creds.numberOfSecretKeys());
// existing token & secret should not be overwritten
assertEquals(token[0], creds.getToken(service[0]));
assertEquals(secret[0], new Text(creds.getSecretKey(secret[0])));
// non-duplicate token & secret should be present
assertEquals(token[1], creds.getToken(service[1]));
assertEquals(secret[1], new Text(creds.getSecretKey(secret[1])));
// new token & secret should be added
assertEquals(token[2], creds.getToken(service[2]));
assertEquals(secret[2], new Text(creds.getSecretKey(secret[2])));
}
@Test
public void testAddTokensToUGI() {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("someone");
Credentials creds = new Credentials();
for (int i=0; i < service.length; i++) {
creds.addToken(service[i], token[i]);
}
ugi.addCredentials(creds);
creds = ugi.getCredentials();
for (int i=0; i < service.length; i++) {
assertSame(token[i], creds.getToken(service[i]));
}
assertEquals(service.length, creds.numberOfTokens());
}
}
| 7,957 | 33.450216 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ManualTestKeytabLogins.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import org.apache.hadoop.security.UserGroupInformation;
import static org.junit.Assert.assertTrue;
/**
* Regression test for HADOOP-6947 which can be run manually in
* a kerberos environment.
*
* To run this test, set up two keytabs, each with a different principal.
* Then run something like:
* <code>
* HADOOP_CLASSPATH=build/test/classes bin/hadoop \
* org.apache.hadoop.security.ManualTestKeytabLogins \
* usera/test@REALM /path/to/usera-keytab \
* userb/test@REALM /path/to/userb-keytab
* </code>
*/
public class ManualTestKeytabLogins {
public static void main(String []args) throws Exception {
if (args.length != 4) {
System.err.println(
"usage: ManualTestKeytabLogins <principal 1> <keytab 1> <principal 2> <keytab 2>");
System.exit(1);
}
UserGroupInformation ugi1 =
UserGroupInformation.loginUserFromKeytabAndReturnUGI(
args[0], args[1]);
System.out.println("UGI 1 = " + ugi1);
assertTrue(ugi1.getUserName().equals(args[0]));
UserGroupInformation ugi2 =
UserGroupInformation.loginUserFromKeytabAndReturnUGI(
args[2], args[3]);
System.out.println("UGI 2 = " + ugi2);
assertTrue(ugi2.getUserName().equals(args[2]));
}
}
| 2,092 | 35.086207 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingBase.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import javax.naming.NamingEnumeration;
import javax.naming.NamingException;
import javax.naming.directory.Attribute;
import javax.naming.directory.Attributes;
import javax.naming.directory.BasicAttribute;
import javax.naming.directory.BasicAttributes;
import javax.naming.directory.DirContext;
import javax.naming.directory.SearchResult;
import org.junit.Before;
public class TestLdapGroupsMappingBase {
protected DirContext mockContext;
protected LdapGroupsMapping mappingSpy = spy(new LdapGroupsMapping());
protected NamingEnumeration mockUserNamingEnum =
mock(NamingEnumeration.class);
protected NamingEnumeration mockGroupNamingEnum =
mock(NamingEnumeration.class);
protected String[] testGroups = new String[] {"group1", "group2"};
@Before
public void setupMocksBase() throws NamingException {
mockContext = mock(DirContext.class);
doReturn(mockContext).when(mappingSpy).getDirContext();
// We only ever call hasMoreElements once for the user NamingEnum, so
// we can just have one return value
when(mockUserNamingEnum.hasMoreElements()).thenReturn(true);
SearchResult mockGroupResult = mock(SearchResult.class);
// We're going to have to define the loop here. We want two iterations,
// to get both the groups
when(mockGroupNamingEnum.hasMoreElements()).thenReturn(true, true, false);
when(mockGroupNamingEnum.nextElement()).thenReturn(mockGroupResult);
// Define the attribute for the name of the first group
Attribute group1Attr = new BasicAttribute("cn");
group1Attr.add(testGroups[0]);
Attributes group1Attrs = new BasicAttributes();
group1Attrs.put(group1Attr);
// Define the attribute for the name of the second group
Attribute group2Attr = new BasicAttribute("cn");
group2Attr.add(testGroups[1]);
Attributes group2Attrs = new BasicAttributes();
group2Attrs.put(group2Attr);
// This search result gets reused, so return group1, then group2
when(mockGroupResult.getAttributes()).thenReturn(group1Attrs, group2Attrs);
}
}
| 3,066 | 38.320513 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.security.ShellBasedIdMapping.PassThroughMap;
import org.apache.hadoop.security.ShellBasedIdMapping.StaticMapping;
import org.junit.Test;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
public class TestShellBasedIdMapping {
private static final Map<Integer, Integer> EMPTY_PASS_THROUGH_MAP =
new PassThroughMap<Integer>();
private void createStaticMapFile(final File smapFile, final String smapStr)
throws IOException {
OutputStream out = new FileOutputStream(smapFile);
out.write(smapStr.getBytes());
out.close();
}
@Test
public void testStaticMapParsing() throws IOException {
File tempStaticMapFile = File.createTempFile("nfs-", ".map");
final String staticMapFileContents =
"uid 10 100\n" +
"gid 10 200\n" +
"uid 11 201 # comment at the end of a line\n" +
"uid 12 301\n" +
"# Comment at the beginning of a line\n" +
" # Comment that starts late in the line\n" +
"uid 10000 10001# line without whitespace before comment\n" +
"uid 13 302\n" +
"gid\t11\t201\n" + // Tabs instead of spaces.
"\n" + // Entirely empty line.
"gid 12 202\n" +
"uid 4294967294 123\n" +
"gid 4294967295 321";
createStaticMapFile(tempStaticMapFile, staticMapFileContents);
StaticMapping parsedMap =
ShellBasedIdMapping.parseStaticMap(tempStaticMapFile);
assertEquals(10, (int)parsedMap.uidMapping.get(100));
assertEquals(11, (int)parsedMap.uidMapping.get(201));
assertEquals(12, (int)parsedMap.uidMapping.get(301));
assertEquals(13, (int)parsedMap.uidMapping.get(302));
assertEquals(10, (int)parsedMap.gidMapping.get(200));
assertEquals(11, (int)parsedMap.gidMapping.get(201));
assertEquals(12, (int)parsedMap.gidMapping.get(202));
assertEquals(10000, (int)parsedMap.uidMapping.get(10001));
// Ensure pass-through of unmapped IDs works.
assertEquals(1000, (int)parsedMap.uidMapping.get(1000));
assertEquals(-2, (int)parsedMap.uidMapping.get(123));
assertEquals(-1, (int)parsedMap.gidMapping.get(321));
}
@Test
public void testStaticMapping() throws IOException {
assumeTrue(!Shell.WINDOWS);
Map<Integer, Integer> uidStaticMap = new PassThroughMap<Integer>();
Map<Integer, Integer> gidStaticMap = new PassThroughMap<Integer>();
uidStaticMap.put(11501, 10);
gidStaticMap.put(497, 200);
// Maps for id to name map
BiMap<Integer, String> uMap = HashBiMap.create();
BiMap<Integer, String> gMap = HashBiMap.create();
String GET_ALL_USERS_CMD =
"echo \"atm:x:1000:1000:Aaron T. Myers,,,:/home/atm:/bin/bash\n"
+ "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\""
+ " | cut -d: -f1,3";
String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n"
+ "mapred:x:497\n"
+ "mapred2:x:498\""
+ " | cut -d: -f1,3";
ShellBasedIdMapping.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
uidStaticMap);
ShellBasedIdMapping.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
gidStaticMap);
assertEquals("hdfs", uMap.get(10));
assertEquals(10, (int)uMap.inverse().get("hdfs"));
assertEquals("atm", uMap.get(1000));
assertEquals(1000, (int)uMap.inverse().get("atm"));
assertEquals("hdfs", gMap.get(11501));
assertEquals(11501, (int)gMap.inverse().get("hdfs"));
assertEquals("mapred", gMap.get(200));
assertEquals(200, (int)gMap.inverse().get("mapred"));
assertEquals("mapred2", gMap.get(498));
assertEquals(498, (int)gMap.inverse().get("mapred2"));
}
// Test staticMap refreshing
@Test
public void testStaticMapUpdate() throws IOException {
assumeTrue(!Shell.WINDOWS);
File tempStaticMapFile = File.createTempFile("nfs-", ".map");
tempStaticMapFile.delete();
Configuration conf = new Configuration();
conf.setLong(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY, 1000);
conf.set(IdMappingConstant.STATIC_ID_MAPPING_FILE_KEY,
tempStaticMapFile.getPath());
ShellBasedIdMapping refIdMapping =
new ShellBasedIdMapping(conf, true);
ShellBasedIdMapping incrIdMapping = new ShellBasedIdMapping(conf);
BiMap<Integer, String> uidNameMap = refIdMapping.getUidNameMap();
BiMap<Integer, String> gidNameMap = refIdMapping.getGidNameMap();
// Force empty map, to see effect of incremental map update of calling
// getUid()
incrIdMapping.clearNameMaps();
uidNameMap = refIdMapping.getUidNameMap();
{
BiMap.Entry<Integer, String> me = uidNameMap.entrySet().iterator().next();
Integer id = me.getKey();
String name = me.getValue();
// The static map is empty, so the id found for "name" would be
// the same as "id"
Integer nid = incrIdMapping.getUid(name);
assertEquals(id, nid);
// Clear map and update staticMap file
incrIdMapping.clearNameMaps();
Integer rid = id + 10000;
String smapStr = "uid " + rid + " " + id;
createStaticMapFile(tempStaticMapFile, smapStr);
// Now the id found for "name" should be the id specified by
// the staticMap
nid = incrIdMapping.getUid(name);
assertEquals(rid, nid);
}
// Force empty map, to see effect of incremental map update of calling
// getGid()
incrIdMapping.clearNameMaps();
gidNameMap = refIdMapping.getGidNameMap();
{
BiMap.Entry<Integer, String> me = gidNameMap.entrySet().iterator().next();
Integer id = me.getKey();
String name = me.getValue();
// The static map is empty, so the id found for "name" would be
// the same as "id"
Integer nid = incrIdMapping.getGid(name);
assertEquals(id, nid);
// Clear map and update staticMap file
incrIdMapping.clearNameMaps();
Integer rid = id + 10000;
String smapStr = "gid " + rid + " " + id;
// Sleep a bit to avoid that two changes have the same modification time
try {Thread.sleep(1000);} catch (InterruptedException e) {}
createStaticMapFile(tempStaticMapFile, smapStr);
// Now the id found for "name" should be the id specified by
// the staticMap
nid = incrIdMapping.getGid(name);
assertEquals(rid, nid);
}
}
@Test
public void testDuplicates() throws IOException {
assumeTrue(!Shell.WINDOWS);
String GET_ALL_USERS_CMD = "echo \"root:x:0:0:root:/root:/bin/bash\n"
+ "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "bin:x:2:2:bin:/bin:/bin/sh\n"
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n"
+ "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n"
+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""
+ " | cut -d: -f1,3";
String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n"
+ "mapred:x:497\n"
+ "mapred2:x:497\n"
+ "mapred:x:498\n"
+ "mapred3:x:498\""
+ " | cut -d: -f1,3";
// Maps for id to name map
BiMap<Integer, String> uMap = HashBiMap.create();
BiMap<Integer, String> gMap = HashBiMap.create();
ShellBasedIdMapping.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
EMPTY_PASS_THROUGH_MAP);
assertEquals(5, uMap.size());
assertEquals("root", uMap.get(0));
assertEquals("hdfs", uMap.get(11501));
assertEquals("hdfs2",uMap.get(11502));
assertEquals("bin", uMap.get(2));
assertEquals("daemon", uMap.get(1));
ShellBasedIdMapping.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
EMPTY_PASS_THROUGH_MAP);
assertTrue(gMap.size() == 3);
assertEquals("hdfs",gMap.get(11501));
assertEquals("mapred", gMap.get(497));
assertEquals("mapred3", gMap.get(498));
}
@Test
public void testIdOutOfIntegerRange() throws IOException {
assumeTrue(!Shell.WINDOWS);
String GET_ALL_USERS_CMD = "echo \""
+ "nfsnobody:x:4294967294:4294967294:Anonymous NFS User:/var/lib/nfs:/sbin/nologin\n"
+ "nfsnobody1:x:4294967295:4294967295:Anonymous NFS User:/var/lib/nfs1:/sbin/nologin\n"
+ "maxint:x:2147483647:2147483647:Grid Distributed File System:/home/maxint:/bin/bash\n"
+ "minint:x:2147483648:2147483648:Grid Distributed File System:/home/minint:/bin/bash\n"
+ "archivebackup:*:1031:4294967294:Archive Backup:/home/users/archivebackup:/bin/sh\n"
+ "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""
+ " | cut -d: -f1,3";
String GET_ALL_GROUPS_CMD = "echo \""
+ "hdfs:*:11501:hrt_hdfs\n"
+ "rpcuser:*:29:\n"
+ "nfsnobody:*:4294967294:\n"
+ "nfsnobody1:*:4294967295:\n"
+ "maxint:*:2147483647:\n"
+ "minint:*:2147483648:\n"
+ "mapred3:x:498\""
+ " | cut -d: -f1,3";
// Maps for id to name map
BiMap<Integer, String> uMap = HashBiMap.create();
BiMap<Integer, String> gMap = HashBiMap.create();
ShellBasedIdMapping.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
EMPTY_PASS_THROUGH_MAP);
assertTrue(uMap.size() == 7);
assertEquals("nfsnobody", uMap.get(-2));
assertEquals("nfsnobody1", uMap.get(-1));
assertEquals("maxint", uMap.get(2147483647));
assertEquals("minint", uMap.get(-2147483648));
assertEquals("archivebackup", uMap.get(1031));
assertEquals("hdfs",uMap.get(11501));
assertEquals("daemon", uMap.get(2));
ShellBasedIdMapping.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
EMPTY_PASS_THROUGH_MAP);
assertTrue(gMap.size() == 7);
assertEquals("hdfs",gMap.get(11501));
assertEquals("rpcuser", gMap.get(29));
assertEquals("nfsnobody", gMap.get(-2));
assertEquals("nfsnobody1", gMap.get(-1));
assertEquals("maxint", gMap.get(2147483647));
assertEquals("minint", gMap.get(-2147483648));
assertEquals("mapred3", gMap.get(498));
}
@Test
public void testUserUpdateSetting() throws IOException {
ShellBasedIdMapping iug = new ShellBasedIdMapping(new Configuration());
assertEquals(iug.getTimeout(),
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_DEFAULT);
Configuration conf = new Configuration();
conf.setLong(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY, 0);
iug = new ShellBasedIdMapping(conf);
assertEquals(iug.getTimeout(), IdMappingConstant.USERGROUPID_UPDATE_MILLIS_MIN);
conf.setLong(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY,
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_DEFAULT * 2);
iug = new ShellBasedIdMapping(conf);
assertEquals(iug.getTimeout(),
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_DEFAULT * 2);
}
@Test
public void testUpdateMapIncr() throws IOException {
Configuration conf = new Configuration();
conf.setLong(IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY, 600000);
ShellBasedIdMapping refIdMapping =
new ShellBasedIdMapping(conf, true);
ShellBasedIdMapping incrIdMapping = new ShellBasedIdMapping(conf);
// Command such as "getent passwd <userName>" will return empty string if
// <username> is numerical, remove them from the map for testing purpose.
BiMap<Integer, String> uidNameMap = refIdMapping.getUidNameMap();
BiMap<Integer, String> gidNameMap = refIdMapping.getGidNameMap();
// Force empty map, to see effect of incremental map update of calling
// getUserName()
incrIdMapping.clearNameMaps();
uidNameMap = refIdMapping.getUidNameMap();
for (BiMap.Entry<Integer, String> me : uidNameMap.entrySet()) {
Integer id = me.getKey();
String name = me.getValue();
String tname = incrIdMapping.getUserName(id, null);
assertEquals(name, tname);
}
assertEquals(uidNameMap.size(), incrIdMapping.getUidNameMap().size());
// Force empty map, to see effect of incremental map update of calling
// getUid()
incrIdMapping.clearNameMaps();
for (BiMap.Entry<Integer, String> me : uidNameMap.entrySet()) {
Integer id = me.getKey();
String name = me.getValue();
Integer tid = incrIdMapping.getUid(name);
assertEquals(id, tid);
}
assertEquals(uidNameMap.size(), incrIdMapping.getUidNameMap().size());
// Force empty map, to see effect of incremental map update of calling
// getGroupName()
incrIdMapping.clearNameMaps();
gidNameMap = refIdMapping.getGidNameMap();
for (BiMap.Entry<Integer, String> me : gidNameMap.entrySet()) {
Integer id = me.getKey();
String name = me.getValue();
String tname = incrIdMapping.getGroupName(id, null);
assertEquals(name, tname);
}
assertEquals(gidNameMap.size(), incrIdMapping.getGidNameMap().size());
// Force empty map, to see effect of incremental map update of calling
// getGid()
incrIdMapping.clearNameMaps();
gidNameMap = refIdMapping.getGidNameMap();
for (BiMap.Entry<Integer, String> me : gidNameMap.entrySet()) {
Integer id = me.getKey();
String name = me.getValue();
Integer tid = incrIdMapping.getGid(name);
assertEquals(id, tid);
}
assertEquals(gidNameMap.size(), incrIdMapping.getGidNameMap().size());
}
}
| 14,762 | 39.00813 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupFallback.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.junit.Assert.assertTrue;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.Test;
public class TestGroupFallback {
public static final Log LOG = LogFactory.getLog(TestGroupFallback.class);
@Test
public void testGroupShell() throws Exception {
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
"org.apache.hadoop.security.ShellBasedUnixGroupsMapping");
Groups groups = new Groups(conf);
String username = System.getProperty("user.name");
List<String> groupList = groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
@Test
public void testNetgroupShell() throws Exception {
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
"org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping");
Groups groups = new Groups(conf);
String username = System.getProperty("user.name");
List<String> groupList = groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
@Test
public void testGroupWithFallback() throws Exception {
LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
"test the normal path and 'mvn -DTestGroupFallback clear test' will" +
" test the fall back functionality");
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
"org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");
Groups groups = new Groups(conf);
String username = System.getProperty("user.name");
List<String> groupList = groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
@Test
public void testNetgroupWithFallback() throws Exception {
LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
"test the normal path and 'mvn -DTestGroupFallback clear test' will" +
" test the fall back functionality");
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
"org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");
Groups groups = new Groups(conf);
String username = System.getProperty("user.name");
List<String> groupList = groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
}
| 3,954 | 36.311321 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/SecurityUtilTestHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
/** helper utils for tests */
public class SecurityUtilTestHelper {
/**
* Allow tests to change the resolver used for tokens
* @param flag boolean for whether token services use ips or hosts
*/
public static void setTokenServiceUseIp(boolean flag) {
SecurityUtil.setTokenServiceUseIp(flag);
}
/**
* Return true if externalKdc=true and the location of the krb5.conf
* file has been specified, and false otherwise.
*/
public static boolean isExternalKdcRunning() {
String externalKdc = System.getProperty("externalKdc");
String krb5Conf = System.getProperty("java.security.krb5.conf");
if(externalKdc == null || !externalKdc.equals("true") ||
krb5Conf == null) {
return false;
}
return true;
}
}
| 1,603 | 33.869565 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestLdapGroupsMappingWithPosixGroup.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.contains;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import javax.naming.NamingException;
import javax.naming.directory.Attribute;
import javax.naming.directory.Attributes;
import javax.naming.directory.SearchControls;
import javax.naming.directory.SearchResult;
import org.apache.hadoop.conf.Configuration;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@SuppressWarnings("unchecked")
public class TestLdapGroupsMappingWithPosixGroup
extends TestLdapGroupsMappingBase {
@Before
public void setupMocks() throws NamingException {
SearchResult mockUserResult = mock(SearchResult.class);
when(mockUserNamingEnum.nextElement()).thenReturn(mockUserResult);
Attribute mockUidNumberAttr = mock(Attribute.class);
Attribute mockGidNumberAttr = mock(Attribute.class);
Attribute mockUidAttr = mock(Attribute.class);
Attributes mockAttrs = mock(Attributes.class);
when(mockUidAttr.get()).thenReturn("some_user");
when(mockUidNumberAttr.get()).thenReturn("700");
when(mockGidNumberAttr.get()).thenReturn("600");
when(mockAttrs.get(eq("uid"))).thenReturn(mockUidAttr);
when(mockAttrs.get(eq("uidNumber"))).thenReturn(mockUidNumberAttr);
when(mockAttrs.get(eq("gidNumber"))).thenReturn(mockGidNumberAttr);
when(mockUserResult.getAttributes()).thenReturn(mockAttrs);
}
@Test
public void testGetGroups() throws IOException, NamingException {
// The search functionality of the mock context is reused, so we will
// return the user NamingEnumeration first, and then the group
when(mockContext.search(anyString(), contains("posix"),
any(Object[].class), any(SearchControls.class)))
.thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
doTestGetGroups(Arrays.asList(testGroups), 2);
}
private void doTestGetGroups(List<String> expectedGroups, int searchTimes)
throws IOException, NamingException {
Configuration conf = new Configuration();
// Set this, so we don't throw an exception
conf.set(LdapGroupsMapping.LDAP_URL_KEY, "ldap://test");
conf.set(LdapGroupsMapping.GROUP_SEARCH_FILTER_KEY,
"(objectClass=posixGroup)(cn={0})");
conf.set(LdapGroupsMapping.USER_SEARCH_FILTER_KEY,
"(objectClass=posixAccount)");
conf.set(LdapGroupsMapping.GROUP_MEMBERSHIP_ATTR_KEY, "memberUid");
conf.set(LdapGroupsMapping.POSIX_UID_ATTR_KEY, "uidNumber");
conf.set(LdapGroupsMapping.POSIX_GID_ATTR_KEY, "gidNumber");
conf.set(LdapGroupsMapping.GROUP_NAME_ATTR_KEY, "cn");
mappingSpy.setConf(conf);
// Username is arbitrary, since the spy is mocked to respond the same,
// regardless of input
List<String> groups = mappingSpy.getGroups("some_user");
Assert.assertEquals(expectedGroups, groups);
mappingSpy.getConf().set(LdapGroupsMapping.POSIX_UID_ATTR_KEY, "uid");
Assert.assertEquals(expectedGroups, groups);
// We should have searched for a user, and then two groups
verify(mockContext, times(searchTimes)).search(anyString(),
anyString(),
any(Object[].class),
any(SearchControls.class));
}
}
| 4,322 | 37.256637 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestJNIGroupsMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.junit.Assume.assumeTrue;
import static org.junit.Assert.*;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.security.GroupMappingServiceProvider;
import org.apache.hadoop.security.JniBasedUnixGroupsMapping;
import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.NativeCodeLoader;
import org.junit.Before;
import org.junit.Test;
public class TestJNIGroupsMapping {
@Before
public void isNativeCodeLoaded() {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
}
@Test
public void testJNIGroupsMapping() throws Exception {
//for the user running the test, check whether the
//ShellBasedUnixGroupsMapping and the JniBasedUnixGroupsMapping
//return the same groups
String user = UserGroupInformation.getCurrentUser().getShortUserName();
testForUser(user);
//check for a dummy non-existent user (both the implementations should
//return an empty list
testForUser("fooBarBaz1234DoesNotExist");
}
private void testForUser(String user) throws Exception {
GroupMappingServiceProvider g = new ShellBasedUnixGroupsMapping();
List<String> shellBasedGroups = g.getGroups(user);
g = new JniBasedUnixGroupsMapping();
List<String> jniBasedGroups = g.getGroups(user);
String[] shellBasedGroupsArray = shellBasedGroups.toArray(new String[0]);
Arrays.sort(shellBasedGroupsArray);
String[] jniBasedGroupsArray = jniBasedGroups.toArray(new String[0]);
Arrays.sort(jniBasedGroupsArray);
if (!Arrays.equals(shellBasedGroupsArray, jniBasedGroupsArray)) {
fail("Groups returned by " +
ShellBasedUnixGroupsMapping.class.getCanonicalName() +
" and " +
JniBasedUnixGroupsMapping.class.getCanonicalName() +
" didn't match for " + user);
}
}
}
| 2,754 | 37.263889 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.junit.*;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.LoginContext;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.ConcurrentModificationException;
import java.util.LinkedHashSet;
import java.util.Set;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
import static org.apache.hadoop.ipc.TestSaslRPC.*;
import static org.apache.hadoop.test.MetricsAsserts.*;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestUserGroupInformation {
final private static String USER_NAME = "[email protected]";
final private static String GROUP1_NAME = "group1";
final private static String GROUP2_NAME = "group2";
final private static String GROUP3_NAME = "group3";
final private static String[] GROUP_NAMES =
new String[]{GROUP1_NAME, GROUP2_NAME, GROUP3_NAME};
// Rollover interval of percentile metrics (in seconds)
private static final int PERCENTILES_INTERVAL = 1;
private static Configuration conf;
/**
* UGI should not use the default security conf, else it will collide
* with other classes that may change the default conf. Using this dummy
* class that simply throws an exception will ensure that the tests fail
* if UGI uses the static default config instead of its own config
*/
private static class DummyLoginConfiguration extends
javax.security.auth.login.Configuration
{
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
throw new RuntimeException("UGI is not using its own security conf!");
}
}
/** configure ugi */
@BeforeClass
public static void setup() {
javax.security.auth.login.Configuration.setConfiguration(
new DummyLoginConfiguration());
// doesn't matter what it is, but getGroups needs it set...
// use HADOOP_HOME environment variable to prevent interfering with logic
// that finds winutils.exe
String home = System.getenv("HADOOP_HOME");
System.setProperty("hadoop.home.dir", (home != null ? home : "."));
// fake the realm is kerberos is enabled
System.setProperty("java.security.krb5.kdc", "");
System.setProperty("java.security.krb5.realm", "DEFAULT.REALM");
}
@Before
public void setupUgi() {
conf = new Configuration();
UserGroupInformation.reset();
UserGroupInformation.setConfiguration(conf);
}
@After
public void resetUgi() {
UserGroupInformation.setLoginUser(null);
}
@Test (timeout = 30000)
public void testSimpleLogin() throws IOException {
tryLoginAuthenticationMethod(AuthenticationMethod.SIMPLE, true);
}
@Test (timeout = 30000)
public void testTokenLogin() throws IOException {
tryLoginAuthenticationMethod(AuthenticationMethod.TOKEN, false);
}
@Test (timeout = 30000)
public void testProxyLogin() throws IOException {
tryLoginAuthenticationMethod(AuthenticationMethod.PROXY, false);
}
private void tryLoginAuthenticationMethod(AuthenticationMethod method,
boolean expectSuccess)
throws IOException {
SecurityUtil.setAuthenticationMethod(method, conf);
UserGroupInformation.setConfiguration(conf); // pick up changed auth
UserGroupInformation ugi = null;
Exception ex = null;
try {
ugi = UserGroupInformation.getLoginUser();
} catch (Exception e) {
ex = e;
}
if (expectSuccess) {
assertNotNull(ugi);
assertEquals(method, ugi.getAuthenticationMethod());
} else {
assertNotNull(ex);
assertEquals(UnsupportedOperationException.class, ex.getClass());
assertEquals(method + " login authentication is not supported",
ex.getMessage());
}
}
@Test (timeout = 30000)
public void testGetRealAuthenticationMethod() {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("user1");
ugi.setAuthenticationMethod(AuthenticationMethod.SIMPLE);
assertEquals(AuthenticationMethod.SIMPLE, ugi.getAuthenticationMethod());
assertEquals(AuthenticationMethod.SIMPLE, ugi.getRealAuthenticationMethod());
ugi = UserGroupInformation.createProxyUser("user2", ugi);
assertEquals(AuthenticationMethod.PROXY, ugi.getAuthenticationMethod());
assertEquals(AuthenticationMethod.SIMPLE, ugi.getRealAuthenticationMethod());
}
@Test (timeout = 30000)
public void testCreateRemoteUser() {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("user1");
assertEquals(AuthenticationMethod.SIMPLE, ugi.getAuthenticationMethod());
assertTrue (ugi.toString().contains("(auth:SIMPLE)"));
ugi = UserGroupInformation.createRemoteUser("user1",
AuthMethod.KERBEROS);
assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod());
assertTrue (ugi.toString().contains("(auth:KERBEROS)"));
}
/** Test login method */
@Test (timeout = 30000)
public void testLogin() throws Exception {
conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
String.valueOf(PERCENTILES_INTERVAL));
UserGroupInformation.setConfiguration(conf);
// login from unix
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
assertEquals(UserGroupInformation.getCurrentUser(),
UserGroupInformation.getLoginUser());
assertTrue(ugi.getGroupNames().length >= 1);
verifyGroupMetrics(1);
// ensure that doAs works correctly
UserGroupInformation userGroupInfo =
UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
UserGroupInformation curUGI =
userGroupInfo.doAs(new PrivilegedExceptionAction<UserGroupInformation>(){
@Override
public UserGroupInformation run() throws IOException {
return UserGroupInformation.getCurrentUser();
}});
// make sure in the scope of the doAs, the right user is current
assertEquals(curUGI, userGroupInfo);
// make sure it is not the same as the login user
assertFalse(curUGI.equals(UserGroupInformation.getLoginUser()));
}
/**
* given user name - get all the groups.
* Needs to happen before creating the test users
*/
@Test (timeout = 30000)
public void testGetServerSideGroups() throws IOException,
InterruptedException {
// get the user name
Process pp = Runtime.getRuntime().exec("whoami");
BufferedReader br = new BufferedReader
(new InputStreamReader(pp.getInputStream()));
String userName = br.readLine().trim();
// If on windows domain, token format is DOMAIN\\user and we want to
// extract only the user name
if(Shell.WINDOWS) {
int sp = userName.lastIndexOf('\\');
if (sp != -1) {
userName = userName.substring(sp + 1);
}
// user names are case insensitive on Windows. Make consistent
userName = StringUtils.toLowerCase(userName);
}
// get the groups
pp = Runtime.getRuntime().exec(Shell.WINDOWS ?
Shell.WINUTILS + " groups -F" : "id -Gn");
br = new BufferedReader(new InputStreamReader(pp.getInputStream()));
String line = br.readLine();
System.out.println(userName + ":" + line);
Set<String> groups = new LinkedHashSet<String> ();
String[] tokens = line.split(Shell.TOKEN_SEPARATOR_REGEX);
for(String s: tokens) {
groups.add(s);
}
final UserGroupInformation login = UserGroupInformation.getCurrentUser();
String loginUserName = login.getShortUserName();
if(Shell.WINDOWS) {
// user names are case insensitive on Windows. Make consistent
loginUserName = StringUtils.toLowerCase(loginUserName);
}
assertEquals(userName, loginUserName);
String[] gi = login.getGroupNames();
assertEquals(groups.size(), gi.length);
for(int i=0; i < gi.length; i++) {
assertTrue(groups.contains(gi[i]));
}
final UserGroupInformation fakeUser =
UserGroupInformation.createRemoteUser("foo.bar");
fakeUser.doAs(new PrivilegedExceptionAction<Object>(){
@Override
public Object run() throws IOException {
UserGroupInformation current = UserGroupInformation.getCurrentUser();
assertFalse(current.equals(login));
assertEquals(current, fakeUser);
assertEquals(0, current.getGroupNames().length);
return null;
}});
}
/** test constructor */
@Test (timeout = 30000)
public void testConstructor() throws Exception {
// security off, so default should just return simple name
testConstructorSuccess("user1", "user1");
testConstructorSuccess("[email protected]", "user2");
testConstructorSuccess("user3/[email protected]", "user3");
testConstructorSuccess("[email protected]", "user4");
testConstructorSuccess("user5/[email protected]", "user5");
// failure test
testConstructorFailures(null);
testConstructorFailures("");
}
/** test constructor */
@Test (timeout = 30000)
public void testConstructorWithRules() throws Exception {
// security off, but use rules if explicitly set
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[1:$1@$0](.*@OTHER.REALM)s/(.*)@.*/other-$1/");
UserGroupInformation.setConfiguration(conf);
testConstructorSuccess("user1", "user1");
testConstructorSuccess("[email protected]", "other-user4");
// failure test
testConstructorFailures("[email protected]");
testConstructorFailures("user3/[email protected]");
testConstructorFailures("user5/[email protected]");
testConstructorFailures(null);
testConstructorFailures("");
}
/** test constructor */
@Test (timeout = 30000)
public void testConstructorWithKerberos() throws Exception {
// security on, default is remove default realm
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
UserGroupInformation.setConfiguration(conf);
testConstructorSuccess("user1", "user1");
testConstructorSuccess("[email protected]", "user2");
testConstructorSuccess("user3/[email protected]", "user3");
// failure test
testConstructorFailures("[email protected]");
testConstructorFailures("user5/[email protected]");
testConstructorFailures(null);
testConstructorFailures("");
}
/** test constructor */
@Test (timeout = 30000)
public void testConstructorWithKerberosRules() throws Exception {
// security on, explicit rules
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[2:$1@$0](.*@OTHER.REALM)s/(.*)@.*/other-$1/" +
"RULE:[1:$1@$0](.*@OTHER.REALM)s/(.*)@.*/other-$1/" +
"DEFAULT");
UserGroupInformation.setConfiguration(conf);
testConstructorSuccess("user1", "user1");
testConstructorSuccess("[email protected]", "user2");
testConstructorSuccess("user3/[email protected]", "user3");
testConstructorSuccess("[email protected]", "other-user4");
testConstructorSuccess("user5/[email protected]", "other-user5");
// failure test
testConstructorFailures(null);
testConstructorFailures("");
}
private void testConstructorSuccess(String principal, String shortName) {
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting(principal, GROUP_NAMES);
// make sure the short and full user names are correct
assertEquals(principal, ugi.getUserName());
assertEquals(shortName, ugi.getShortUserName());
}
private void testConstructorFailures(String userName) {
try {
UserGroupInformation.createRemoteUser(userName);
fail("user:"+userName+" wasn't invalid");
} catch (IllegalArgumentException e) {
String expect = (userName == null || userName.isEmpty())
? "Null user" : "Illegal principal name "+userName;
assertTrue("Did not find "+ expect + " in " + e,
e.toString().contains(expect));
}
}
@Test (timeout = 30000)
public void testSetConfigWithRules() {
String[] rules = { "RULE:[1:TEST1]", "RULE:[1:TEST2]", "RULE:[1:TEST3]" };
// explicitly set a rule
UserGroupInformation.reset();
assertFalse(KerberosName.hasRulesBeenSet());
KerberosName.setRules(rules[0]);
assertTrue(KerberosName.hasRulesBeenSet());
assertEquals(rules[0], KerberosName.getRules());
// implicit init should honor rules already being set
UserGroupInformation.createUserForTesting("someone", new String[0]);
assertEquals(rules[0], KerberosName.getRules());
// set conf, should override
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL, rules[1]);
UserGroupInformation.setConfiguration(conf);
assertEquals(rules[1], KerberosName.getRules());
// set conf, should again override
conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL, rules[2]);
UserGroupInformation.setConfiguration(conf);
assertEquals(rules[2], KerberosName.getRules());
// implicit init should honor rules already being set
UserGroupInformation.createUserForTesting("someone", new String[0]);
assertEquals(rules[2], KerberosName.getRules());
}
@Test (timeout = 30000)
public void testEnsureInitWithRules() throws IOException {
String rules = "RULE:[1:RULE1]";
// trigger implicit init, rules should init
UserGroupInformation.reset();
assertFalse(KerberosName.hasRulesBeenSet());
UserGroupInformation.createUserForTesting("someone", new String[0]);
assertTrue(KerberosName.hasRulesBeenSet());
// set a rule, trigger implicit init, rule should not change
UserGroupInformation.reset();
KerberosName.setRules(rules);
assertTrue(KerberosName.hasRulesBeenSet());
assertEquals(rules, KerberosName.getRules());
UserGroupInformation.createUserForTesting("someone", new String[0]);
assertEquals(rules, KerberosName.getRules());
}
@Test (timeout = 30000)
public void testEquals() throws Exception {
UserGroupInformation uugi =
UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
assertEquals(uugi, uugi);
// The subjects should be different, so this should fail
UserGroupInformation ugi2 =
UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
assertFalse(uugi.equals(ugi2));
assertFalse(uugi.hashCode() == ugi2.hashCode());
// two ugi that have the same subject need to be equal
UserGroupInformation ugi3 = new UserGroupInformation(uugi.getSubject());
assertEquals(uugi, ugi3);
assertEquals(uugi.hashCode(), ugi3.hashCode());
}
@Test (timeout = 30000)
public void testEqualsWithRealUser() throws Exception {
UserGroupInformation realUgi1 = UserGroupInformation.createUserForTesting(
"RealUser", GROUP_NAMES);
UserGroupInformation proxyUgi1 = UserGroupInformation.createProxyUser(
USER_NAME, realUgi1);
UserGroupInformation proxyUgi2 =
new UserGroupInformation( proxyUgi1.getSubject());
UserGroupInformation remoteUgi = UserGroupInformation.createRemoteUser(USER_NAME);
assertEquals(proxyUgi1, proxyUgi2);
assertFalse(remoteUgi.equals(proxyUgi1));
}
@Test (timeout = 30000)
public void testGettingGroups() throws Exception {
UserGroupInformation uugi =
UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
assertEquals(USER_NAME, uugi.getUserName());
assertArrayEquals(new String[]{GROUP1_NAME, GROUP2_NAME, GROUP3_NAME},
uugi.getGroupNames());
}
@SuppressWarnings("unchecked") // from Mockito mocks
@Test (timeout = 30000)
public <T extends TokenIdentifier> void testAddToken() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("someone");
Token<T> t1 = mock(Token.class);
Token<T> t2 = mock(Token.class);
Token<T> t3 = mock(Token.class);
// add token to ugi
ugi.addToken(t1);
checkTokens(ugi, t1);
// replace token t1 with t2 - with same key (null)
ugi.addToken(t2);
checkTokens(ugi, t2);
// change t1 service and add token
when(t1.getService()).thenReturn(new Text("t1"));
ugi.addToken(t1);
checkTokens(ugi, t1, t2);
// overwrite t1 token with t3 - same key (!null)
when(t3.getService()).thenReturn(new Text("t1"));
ugi.addToken(t3);
checkTokens(ugi, t2, t3);
// just try to re-add with new name
when(t1.getService()).thenReturn(new Text("t1.1"));
ugi.addToken(t1);
checkTokens(ugi, t1, t2, t3);
// just try to re-add with new name again
ugi.addToken(t1);
checkTokens(ugi, t1, t2, t3);
}
@SuppressWarnings("unchecked") // from Mockito mocks
@Test (timeout = 30000)
public <T extends TokenIdentifier> void testGetCreds() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("someone");
Text service = new Text("service");
Token<T> t1 = mock(Token.class);
when(t1.getService()).thenReturn(service);
Token<T> t2 = mock(Token.class);
when(t2.getService()).thenReturn(new Text("service2"));
Token<T> t3 = mock(Token.class);
when(t3.getService()).thenReturn(service);
// add token to ugi
ugi.addToken(t1);
ugi.addToken(t2);
checkTokens(ugi, t1, t2);
Credentials creds = ugi.getCredentials();
creds.addToken(t3.getService(), t3);
assertSame(t3, creds.getToken(service));
// check that ugi wasn't modified
checkTokens(ugi, t1, t2);
}
@SuppressWarnings("unchecked") // from Mockito mocks
@Test (timeout = 30000)
public <T extends TokenIdentifier> void testAddCreds() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("someone");
Text service = new Text("service");
Token<T> t1 = mock(Token.class);
when(t1.getService()).thenReturn(service);
Token<T> t2 = mock(Token.class);
when(t2.getService()).thenReturn(new Text("service2"));
byte[] secret = new byte[]{};
Text secretKey = new Text("sshhh");
// fill credentials
Credentials creds = new Credentials();
creds.addToken(t1.getService(), t1);
creds.addToken(t2.getService(), t2);
creds.addSecretKey(secretKey, secret);
// add creds to ugi, and check ugi
ugi.addCredentials(creds);
checkTokens(ugi, t1, t2);
assertSame(secret, ugi.getCredentials().getSecretKey(secretKey));
}
@Test (timeout = 30000)
public <T extends TokenIdentifier> void testGetCredsNotSame()
throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("someone");
Credentials creds = ugi.getCredentials();
// should always get a new copy
assertNotSame(creds, ugi.getCredentials());
}
private void checkTokens(UserGroupInformation ugi, Token<?> ... tokens) {
// check the ugi's token collection
Collection<Token<?>> ugiTokens = ugi.getTokens();
for (Token<?> t : tokens) {
assertTrue(ugiTokens.contains(t));
}
assertEquals(tokens.length, ugiTokens.size());
// check the ugi's credentials
Credentials ugiCreds = ugi.getCredentials();
for (Token<?> t : tokens) {
assertSame(t, ugiCreds.getToken(t.getService()));
}
assertEquals(tokens.length, ugiCreds.numberOfTokens());
}
@SuppressWarnings("unchecked") // from Mockito mocks
@Test (timeout = 30000)
public <T extends TokenIdentifier> void testAddNamedToken() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("someone");
Token<T> t1 = mock(Token.class);
Text service1 = new Text("t1");
Text service2 = new Text("t2");
when(t1.getService()).thenReturn(service1);
// add token
ugi.addToken(service1, t1);
assertSame(t1, ugi.getCredentials().getToken(service1));
// add token with another name
ugi.addToken(service2, t1);
assertSame(t1, ugi.getCredentials().getToken(service1));
assertSame(t1, ugi.getCredentials().getToken(service2));
}
@SuppressWarnings("unchecked") // from Mockito mocks
@Test (timeout = 30000)
public <T extends TokenIdentifier> void testUGITokens() throws Exception {
UserGroupInformation ugi =
UserGroupInformation.createUserForTesting("TheDoctor",
new String [] { "TheTARDIS"});
Token<T> t1 = mock(Token.class);
when(t1.getService()).thenReturn(new Text("t1"));
Token<T> t2 = mock(Token.class);
when(t2.getService()).thenReturn(new Text("t2"));
Credentials creds = new Credentials();
byte[] secretKey = new byte[]{};
Text secretName = new Text("shhh");
creds.addSecretKey(secretName, secretKey);
ugi.addToken(t1);
ugi.addToken(t2);
ugi.addCredentials(creds);
Collection<Token<? extends TokenIdentifier>> z = ugi.getTokens();
assertTrue(z.contains(t1));
assertTrue(z.contains(t2));
assertEquals(2, z.size());
Credentials ugiCreds = ugi.getCredentials();
assertSame(secretKey, ugiCreds.getSecretKey(secretName));
assertEquals(1, ugiCreds.numberOfSecretKeys());
try {
z.remove(t1);
fail("Shouldn't be able to modify token collection from UGI");
} catch(UnsupportedOperationException uoe) {
// Can't modify tokens
}
// ensure that the tokens are passed through doAs
Collection<Token<? extends TokenIdentifier>> otherSet =
ugi.doAs(new PrivilegedExceptionAction<Collection<Token<?>>>(){
@Override
public Collection<Token<?>> run() throws IOException {
return UserGroupInformation.getCurrentUser().getTokens();
}
});
assertTrue(otherSet.contains(t1));
assertTrue(otherSet.contains(t2));
}
@Test (timeout = 30000)
public void testTokenIdentifiers() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
"TheDoctor", new String[] { "TheTARDIS" });
TokenIdentifier t1 = mock(TokenIdentifier.class);
TokenIdentifier t2 = mock(TokenIdentifier.class);
ugi.addTokenIdentifier(t1);
ugi.addTokenIdentifier(t2);
Collection<TokenIdentifier> z = ugi.getTokenIdentifiers();
assertTrue(z.contains(t1));
assertTrue(z.contains(t2));
assertEquals(2, z.size());
// ensure that the token identifiers are passed through doAs
Collection<TokenIdentifier> otherSet = ugi
.doAs(new PrivilegedExceptionAction<Collection<TokenIdentifier>>() {
@Override
public Collection<TokenIdentifier> run() throws IOException {
return UserGroupInformation.getCurrentUser().getTokenIdentifiers();
}
});
assertTrue(otherSet.contains(t1));
assertTrue(otherSet.contains(t2));
assertEquals(2, otherSet.size());
}
@Test (timeout = 30000)
public void testTestAuthMethod() throws Exception {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
// verify the reverse mappings works
for (AuthenticationMethod am : AuthenticationMethod.values()) {
if (am.getAuthMethod() != null) {
ugi.setAuthenticationMethod(am.getAuthMethod());
assertEquals(am, ugi.getAuthenticationMethod());
}
}
}
@Test (timeout = 30000)
public void testUGIAuthMethod() throws Exception {
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
final AuthenticationMethod am = AuthenticationMethod.KERBEROS;
ugi.setAuthenticationMethod(am);
Assert.assertEquals(am, ugi.getAuthenticationMethod());
ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
Assert.assertEquals(am, UserGroupInformation.getCurrentUser()
.getAuthenticationMethod());
return null;
}
});
}
@Test (timeout = 30000)
public void testUGIAuthMethodInRealUser() throws Exception {
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
UserGroupInformation proxyUgi = UserGroupInformation.createProxyUser(
"proxy", ugi);
final AuthenticationMethod am = AuthenticationMethod.KERBEROS;
ugi.setAuthenticationMethod(am);
Assert.assertEquals(am, ugi.getAuthenticationMethod());
Assert.assertEquals(AuthenticationMethod.PROXY,
proxyUgi.getAuthenticationMethod());
Assert.assertEquals(am, UserGroupInformation
.getRealAuthenticationMethod(proxyUgi));
proxyUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
Assert.assertEquals(AuthenticationMethod.PROXY, UserGroupInformation
.getCurrentUser().getAuthenticationMethod());
Assert.assertEquals(am, UserGroupInformation.getCurrentUser()
.getRealUser().getAuthenticationMethod());
return null;
}
});
UserGroupInformation proxyUgi2 =
new UserGroupInformation(proxyUgi.getSubject());
proxyUgi2.setAuthenticationMethod(AuthenticationMethod.PROXY);
Assert.assertEquals(proxyUgi, proxyUgi2);
// Equality should work if authMethod is null
UserGroupInformation realugi = UserGroupInformation.getCurrentUser();
UserGroupInformation proxyUgi3 = UserGroupInformation.createProxyUser(
"proxyAnother", realugi);
UserGroupInformation proxyUgi4 =
new UserGroupInformation(proxyUgi3.getSubject());
Assert.assertEquals(proxyUgi3, proxyUgi4);
}
@Test (timeout = 30000)
public void testLoginObjectInSubject() throws Exception {
UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
UserGroupInformation anotherUgi = new UserGroupInformation(loginUgi
.getSubject());
LoginContext login1 = loginUgi.getSubject().getPrincipals(User.class)
.iterator().next().getLogin();
LoginContext login2 = anotherUgi.getSubject().getPrincipals(User.class)
.iterator().next().getLogin();
//login1 and login2 must be same instances
Assert.assertTrue(login1 == login2);
}
@Test (timeout = 30000)
public void testLoginModuleCommit() throws Exception {
UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
User user1 = loginUgi.getSubject().getPrincipals(User.class).iterator()
.next();
LoginContext login = user1.getLogin();
login.logout();
login.login();
User user2 = loginUgi.getSubject().getPrincipals(User.class).iterator()
.next();
// user1 and user2 must be same instances.
Assert.assertTrue(user1 == user2);
}
public static void verifyLoginMetrics(long success, int failure)
throws IOException {
// Ensure metrics related to kerberos login is updated.
MetricsRecordBuilder rb = getMetrics("UgiMetrics");
if (success > 0) {
assertCounter("LoginSuccessNumOps", success, rb);
assertGaugeGt("LoginSuccessAvgTime", 0, rb);
}
if (failure > 0) {
assertCounter("LoginFailureNumPos", failure, rb);
assertGaugeGt("LoginFailureAvgTime", 0, rb);
}
}
private static void verifyGroupMetrics(
long groups) throws InterruptedException {
MetricsRecordBuilder rb = getMetrics("UgiMetrics");
if (groups > 0) {
assertCounterGt("GetGroupsNumOps", groups-1, rb);
double avg = getDoubleGauge("GetGroupsAvgTime", rb);
assertTrue(avg >= 0.0);
// Sleep for an interval+slop to let the percentiles rollover
Thread.sleep((PERCENTILES_INTERVAL+1)*1000);
// Check that the percentiles were updated
assertQuantileGauges("GetGroups1s", rb);
}
}
/**
* Test for the case that UserGroupInformation.getCurrentUser()
* is called when the AccessControlContext has a Subject associated
* with it, but that Subject was not created by Hadoop (ie it has no
* associated User principal)
*/
@Test (timeout = 30000)
public void testUGIUnderNonHadoopContext() throws Exception {
Subject nonHadoopSubject = new Subject();
Subject.doAs(nonHadoopSubject, new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
assertNotNull(ugi);
return null;
}
});
}
@Test (timeout = 30000)
public void testGetUGIFromSubject() throws Exception {
KerberosPrincipal p = new KerberosPrincipal("guest");
Subject subject = new Subject();
subject.getPrincipals().add(p);
UserGroupInformation ugi = UserGroupInformation.getUGIFromSubject(subject);
assertNotNull(ugi);
assertEquals("[email protected]", ugi.getUserName());
}
@Test(timeout=1000)
public void testSetLoginUser() throws IOException {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test-user");
UserGroupInformation.setLoginUser(ugi);
assertEquals(ugi, UserGroupInformation.getLoginUser());
}
/**
* In some scenario, such as HA, delegation tokens are associated with a
* logical name. The tokens are cloned and are associated with the
* physical address of the server where the service is provided.
* This test ensures cloned delegated tokens are locally used
* and are not returned in {@link UserGroupInformation#getCredentials()}
*/
@Test
public void testPrivateTokenExclusion() throws Exception {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
TestTokenIdentifier tokenId = new TestTokenIdentifier();
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(
tokenId.getBytes(), "password".getBytes(),
tokenId.getKind(), null);
ugi.addToken(new Text("regular-token"), token);
// Now add cloned private token
ugi.addToken(new Text("private-token"), new Token.PrivateToken<TestTokenIdentifier>(token));
ugi.addToken(new Text("private-token1"), new Token.PrivateToken<TestTokenIdentifier>(token));
// Ensure only non-private tokens are returned
Collection<Token<? extends TokenIdentifier>> tokens = ugi.getCredentials().getAllTokens();
assertEquals(1, tokens.size());
}
/**
* This test checks a race condition between getting and adding tokens for
* the current user. Calling UserGroupInformation.getCurrentUser() returns
* a new object each time, so simply making these methods synchronized was not
* enough to prevent race conditions and causing a
* ConcurrentModificationException. These methods are synchronized on the
* Subject, which is the same object between UserGroupInformation instances.
* This test tries to cause a CME, by exposing the race condition. Previously
* this test would fail every time; now it does not.
*/
@Test
public void testTokenRaceCondition() throws Exception {
UserGroupInformation userGroupInfo =
UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
userGroupInfo.doAs(new PrivilegedExceptionAction<Void>(){
@Override
public Void run() throws Exception {
// make sure it is not the same as the login user because we use the
// same UGI object for every instantiation of the login user and you
// won't run into the race condition otherwise
assertNotEquals(UserGroupInformation.getLoginUser(),
UserGroupInformation.getCurrentUser());
GetTokenThread thread = new GetTokenThread();
try {
thread.start();
for (int i = 0; i < 100; i++) {
@SuppressWarnings("unchecked")
Token<? extends TokenIdentifier> t = mock(Token.class);
when(t.getService()).thenReturn(new Text("t" + i));
UserGroupInformation.getCurrentUser().addToken(t);
assertNull("ConcurrentModificationException encountered",
thread.cme);
}
} catch (ConcurrentModificationException cme) {
cme.printStackTrace();
fail("ConcurrentModificationException encountered");
} finally {
thread.runThread = false;
thread.join(5 * 1000);
}
return null;
}});
}
static class GetTokenThread extends Thread {
boolean runThread = true;
volatile ConcurrentModificationException cme = null;
@Override
public void run() {
while(runThread) {
try {
UserGroupInformation.getCurrentUser().getCredentials();
} catch (ConcurrentModificationException cme) {
this.cme = cme;
cme.printStackTrace();
runThread = false;
} catch (IOException ex) {
ex.printStackTrace();
}
}
}
}
}
| 34,560 | 37.615642 | 107 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestProxyUserFromEnv.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import org.junit.Test;
public class TestProxyUserFromEnv {
/** Test HADOOP_PROXY_USER for impersonation */
@Test
public void testProxyUserFromEnvironment() throws IOException {
String proxyUser = "foo.bar";
System.setProperty(UserGroupInformation.HADOOP_PROXY_USER, proxyUser);
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
assertEquals(proxyUser, ugi.getUserName());
UserGroupInformation realUgi = ugi.getRealUser();
assertNotNull(realUgi);
// get the expected real user name
Process pp = Runtime.getRuntime().exec("whoami");
BufferedReader br = new BufferedReader
(new InputStreamReader(pp.getInputStream()));
String realUser = br.readLine().trim();
// On Windows domain joined machine, whoami returns the username
// in the DOMAIN\\username format, so we trim the domain part before
// the comparison. We don't have to special case for Windows
// given that Unix systems do not allow slashes in usernames.
int backslashIndex = realUser.indexOf('\\');
if (backslashIndex != -1) {
realUser = realUser.substring(backslashIndex + 1);
}
assertEquals(realUser, realUgi.getUserName());
}
}
| 2,240 | 38.315789 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.*;
import static org.junit.Assert.*;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URI;
import javax.security.auth.kerberos.KerberosPrincipal;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.StringUtils;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
public class TestSecurityUtil {
@BeforeClass
public static void unsetKerberosRealm() {
// prevent failures if kinit-ed or on os x with no realm
System.setProperty("java.security.krb5.kdc", "");
System.setProperty("java.security.krb5.realm", "NONE");
}
@Test
public void isOriginalTGTReturnsCorrectValues() {
assertTrue(SecurityUtil.isTGSPrincipal
(new KerberosPrincipal("krbtgt/foo@foo")));
assertTrue(SecurityUtil.isTGSPrincipal
(new KerberosPrincipal("krbtgt/[email protected]")));
assertFalse(SecurityUtil.isTGSPrincipal
(null));
assertFalse(SecurityUtil.isTGSPrincipal
(new KerberosPrincipal("blah")));
assertFalse(SecurityUtil.isTGSPrincipal
(new KerberosPrincipal("krbtgt/hello")));
assertFalse(SecurityUtil.isTGSPrincipal
(new KerberosPrincipal("krbtgt/foo@FOO")));
}
private void verify(String original, String hostname, String expected)
throws IOException {
assertEquals(expected,
SecurityUtil.getServerPrincipal(original, hostname));
InetAddress addr = mockAddr(hostname);
assertEquals(expected,
SecurityUtil.getServerPrincipal(original, addr));
}
private InetAddress mockAddr(String reverseTo) {
InetAddress mock = Mockito.mock(InetAddress.class);
Mockito.doReturn(reverseTo).when(mock).getCanonicalHostName();
return mock;
}
@Test
public void testGetServerPrincipal() throws IOException {
String service = "hdfs/";
String realm = "@REALM";
String hostname = "foohost";
String userPrincipal = "foo@FOOREALM";
String shouldReplace = service + SecurityUtil.HOSTNAME_PATTERN + realm;
String replaced = service + hostname + realm;
verify(shouldReplace, hostname, replaced);
String shouldNotReplace = service + SecurityUtil.HOSTNAME_PATTERN + "NAME"
+ realm;
verify(shouldNotReplace, hostname, shouldNotReplace);
verify(userPrincipal, hostname, userPrincipal);
// testing reverse DNS lookup doesn't happen
InetAddress notUsed = Mockito.mock(InetAddress.class);
assertEquals(shouldNotReplace,
SecurityUtil.getServerPrincipal(shouldNotReplace, notUsed));
Mockito.verify(notUsed, Mockito.never()).getCanonicalHostName();
}
@Test
public void testPrincipalsWithLowerCaseHosts() throws IOException {
String service = "xyz/";
String realm = "@REALM";
String principalInConf = service + SecurityUtil.HOSTNAME_PATTERN + realm;
String hostname = "FooHost";
String principal =
service + StringUtils.toLowerCase(hostname) + realm;
verify(principalInConf, hostname, principal);
}
@Test
public void testLocalHostNameForNullOrWild() throws Exception {
String local = StringUtils.toLowerCase(SecurityUtil.getLocalHostName());
assertEquals("hdfs/" + local + "@REALM",
SecurityUtil.getServerPrincipal("hdfs/_HOST@REALM", (String)null));
assertEquals("hdfs/" + local + "@REALM",
SecurityUtil.getServerPrincipal("hdfs/_HOST@REALM", "0.0.0.0"));
}
@Test
public void testStartsWithIncorrectSettings() throws IOException {
Configuration conf = new Configuration();
SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
String keyTabKey="key";
conf.set(keyTabKey, "");
UserGroupInformation.setConfiguration(conf);
boolean gotException = false;
try {
SecurityUtil.login(conf, keyTabKey, "", "");
} catch (IOException e) {
// expected
gotException=true;
}
assertTrue("Exception for empty keytabfile name was expected", gotException);
}
@Test
public void testGetHostFromPrincipal() {
assertEquals("host",
SecurityUtil.getHostFromPrincipal("service/host@realm"));
assertEquals(null,
SecurityUtil.getHostFromPrincipal("service@realm"));
}
@Test
public void testBuildDTServiceName() {
SecurityUtil.setTokenServiceUseIp(true);
assertEquals("127.0.0.1:123",
SecurityUtil.buildDTServiceName(URI.create("test://LocalHost"), 123)
);
assertEquals("127.0.0.1:123",
SecurityUtil.buildDTServiceName(URI.create("test://LocalHost:123"), 456)
);
assertEquals("127.0.0.1:123",
SecurityUtil.buildDTServiceName(URI.create("test://127.0.0.1"), 123)
);
assertEquals("127.0.0.1:123",
SecurityUtil.buildDTServiceName(URI.create("test://127.0.0.1:123"), 456)
);
}
@Test
public void testBuildTokenServiceSockAddr() {
SecurityUtil.setTokenServiceUseIp(true);
assertEquals("127.0.0.1:123",
SecurityUtil.buildTokenService(new InetSocketAddress("LocalHost", 123)).toString()
);
assertEquals("127.0.0.1:123",
SecurityUtil.buildTokenService(new InetSocketAddress("127.0.0.1", 123)).toString()
);
// what goes in, comes out
assertEquals("127.0.0.1:123",
SecurityUtil.buildTokenService(NetUtils.createSocketAddr("127.0.0.1", 123)).toString()
);
}
@Test
public void testGoodHostsAndPorts() {
InetSocketAddress compare = NetUtils.createSocketAddrForHost("localhost", 123);
runGoodCases(compare, "localhost", 123);
runGoodCases(compare, "localhost:", 123);
runGoodCases(compare, "localhost:123", 456);
}
void runGoodCases(InetSocketAddress addr, String host, int port) {
assertEquals(addr, NetUtils.createSocketAddr(host, port));
assertEquals(addr, NetUtils.createSocketAddr("hdfs://"+host, port));
assertEquals(addr, NetUtils.createSocketAddr("hdfs://"+host+"/path", port));
}
@Test
public void testBadHostsAndPorts() {
runBadCases("", true);
runBadCases(":", false);
runBadCases("hdfs/", false);
runBadCases("hdfs:/", false);
runBadCases("hdfs://", true);
}
void runBadCases(String prefix, boolean validIfPosPort) {
runBadPortPermutes(prefix, false);
runBadPortPermutes(prefix+"*", false);
runBadPortPermutes(prefix+"localhost", validIfPosPort);
runBadPortPermutes(prefix+"localhost:-1", false);
runBadPortPermutes(prefix+"localhost:-123", false);
runBadPortPermutes(prefix+"localhost:xyz", false);
runBadPortPermutes(prefix+"localhost/xyz", validIfPosPort);
runBadPortPermutes(prefix+"localhost/:123", validIfPosPort);
runBadPortPermutes(prefix+":123", false);
runBadPortPermutes(prefix+":xyz", false);
}
void runBadPortPermutes(String arg, boolean validIfPosPort) {
int ports[] = { -123, -1, 123 };
boolean bad = false;
try {
NetUtils.createSocketAddr(arg);
} catch (IllegalArgumentException e) {
bad = true;
} finally {
assertTrue("should be bad: '"+arg+"'", bad);
}
for (int port : ports) {
if (validIfPosPort && port > 0) continue;
bad = false;
try {
NetUtils.createSocketAddr(arg, port);
} catch (IllegalArgumentException e) {
bad = true;
} finally {
assertTrue("should be bad: '"+arg+"' (default port:"+port+")", bad);
}
}
}
// check that the socket addr has:
// 1) the InetSocketAddress has the correct hostname, ie. exact host/ip given
// 2) the address is resolved, ie. has an ip
// 3,4) the socket's InetAddress has the same hostname, and the correct ip
// 5) the port is correct
private void
verifyValues(InetSocketAddress addr, String host, String ip, int port) {
assertTrue(!addr.isUnresolved());
// don't know what the standard resolver will return for hostname.
// should be host for host; host or ip for ip is ambiguous
if (!SecurityUtil.useIpForTokenService) {
assertEquals(host, addr.getHostName());
assertEquals(host, addr.getAddress().getHostName());
}
assertEquals(ip, addr.getAddress().getHostAddress());
assertEquals(port, addr.getPort());
}
// check:
// 1) buildTokenService honors use_ip setting
// 2) setTokenService & getService works
// 3) getTokenServiceAddr decodes to the identical socket addr
private void
verifyTokenService(InetSocketAddress addr, String host, String ip, int port, boolean useIp) {
//LOG.info("address:"+addr+" host:"+host+" ip:"+ip+" port:"+port);
SecurityUtil.setTokenServiceUseIp(useIp);
String serviceHost = useIp ? ip : StringUtils.toLowerCase(host);
Token<?> token = new Token<TokenIdentifier>();
Text service = new Text(serviceHost+":"+port);
assertEquals(service, SecurityUtil.buildTokenService(addr));
SecurityUtil.setTokenService(token, addr);
assertEquals(service, token.getService());
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
assertNotNull(serviceAddr);
verifyValues(serviceAddr, serviceHost, ip, port);
}
// check:
// 1) socket addr is created with fields set as expected
// 2) token service with ips
// 3) token service with the given host or ip
private void
verifyAddress(InetSocketAddress addr, String host, String ip, int port) {
verifyValues(addr, host, ip, port);
//LOG.info("test that token service uses ip");
verifyTokenService(addr, host, ip, port, true);
//LOG.info("test that token service uses host");
verifyTokenService(addr, host, ip, port, false);
}
// check:
// 1-4) combinations of host and port
// this will construct a socket addr, verify all the fields, build the
// service to verify the use_ip setting is honored, set the token service
// based on addr and verify the token service is set correctly, decode
// the token service and ensure all the fields of the decoded addr match
private void verifyServiceAddr(String host, String ip) {
InetSocketAddress addr;
int port = 123;
// test host, port tuple
//LOG.info("test tuple ("+host+","+port+")");
addr = NetUtils.createSocketAddrForHost(host, port);
verifyAddress(addr, host, ip, port);
// test authority with no default port
//LOG.info("test authority '"+host+":"+port+"'");
addr = NetUtils.createSocketAddr(host+":"+port);
verifyAddress(addr, host, ip, port);
// test authority with a default port, make sure default isn't used
//LOG.info("test authority '"+host+":"+port+"' with ignored default port");
addr = NetUtils.createSocketAddr(host+":"+port, port+1);
verifyAddress(addr, host, ip, port);
// test host-only authority, using port as default port
//LOG.info("test host:"+host+" port:"+port);
addr = NetUtils.createSocketAddr(host, port);
verifyAddress(addr, host, ip, port);
}
@Test
public void testSocketAddrWithName() {
String staticHost = "my";
NetUtils.addStaticResolution(staticHost, "localhost");
verifyServiceAddr("LocalHost", "127.0.0.1");
}
@Test
public void testSocketAddrWithIP() {
String staticHost = "127.0.0.1";
NetUtils.addStaticResolution(staticHost, "localhost");
verifyServiceAddr(staticHost, "127.0.0.1");
}
@Test
public void testSocketAddrWithNameToStaticName() {
String staticHost = "host1";
NetUtils.addStaticResolution(staticHost, "localhost");
verifyServiceAddr(staticHost, "127.0.0.1");
}
@Test
public void testSocketAddrWithNameToStaticIP() {
String staticHost = "host3";
NetUtils.addStaticResolution(staticHost, "255.255.255.255");
verifyServiceAddr(staticHost, "255.255.255.255");
}
// this is a bizarre case, but it's if a test tries to remap an ip address
@Test
public void testSocketAddrWithIPToStaticIP() {
String staticHost = "1.2.3.4";
NetUtils.addStaticResolution(staticHost, "255.255.255.255");
verifyServiceAddr(staticHost, "255.255.255.255");
}
@Test
public void testGetAuthenticationMethod() {
Configuration conf = new Configuration();
// default is simple
conf.unset(HADOOP_SECURITY_AUTHENTICATION);
assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
// simple
conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
// kerberos
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
assertEquals(KERBEROS, SecurityUtil.getAuthenticationMethod(conf));
// bad value
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kaboom");
String error = null;
try {
SecurityUtil.getAuthenticationMethod(conf);
} catch (Exception e) {
error = e.toString();
}
assertEquals("java.lang.IllegalArgumentException: " +
"Invalid attribute value for " +
HADOOP_SECURITY_AUTHENTICATION + " of kaboom", error);
}
@Test
public void testSetAuthenticationMethod() {
Configuration conf = new Configuration();
// default
SecurityUtil.setAuthenticationMethod(null, conf);
assertEquals("simple", conf.get(HADOOP_SECURITY_AUTHENTICATION));
// simple
SecurityUtil.setAuthenticationMethod(SIMPLE, conf);
assertEquals("simple", conf.get(HADOOP_SECURITY_AUTHENTICATION));
// kerberos
SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
assertEquals("kerberos", conf.get(HADOOP_SECURITY_AUTHENTICATION));
}
}
| 14,660 | 35.929471 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestCompositeGroupMapping.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.junit.Test;
public class TestCompositeGroupMapping {
public static final Log LOG = LogFactory.getLog(TestCompositeGroupMapping.class);
private static Configuration conf = new Configuration();
private static class TestUser {
String name;
String group;
String group2;
public TestUser(String name, String group) {
this.name = name;
this.group = group;
}
public TestUser(String name, String group, String group2) {
this(name, group);
this.group2 = group2;
}
};
private static TestUser john = new TestUser("John", "user-group");
private static TestUser hdfs = new TestUser("hdfs", "supergroup");
private static TestUser jack = new TestUser("Jack", "user-group", "dev-group-1");
private static final String PROVIDER_SPECIFIC_CONF = ".test.prop";
private static final String PROVIDER_SPECIFIC_CONF_KEY =
GroupMappingServiceProvider.GROUP_MAPPING_CONFIG_PREFIX + PROVIDER_SPECIFIC_CONF;
private static final String PROVIDER_SPECIFIC_CONF_VALUE_FOR_USER = "value-for-user";
private static final String PROVIDER_SPECIFIC_CONF_VALUE_FOR_CLUSTER = "value-for-cluster";
private static abstract class GroupMappingProviderBase
implements GroupMappingServiceProvider, Configurable {
private Configuration conf;
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return this.conf;
}
@Override
public void cacheGroupsRefresh() throws IOException {
}
@Override
public void cacheGroupsAdd(List<String> groups) throws IOException {
}
protected List<String> toList(String group) {
if (group != null) {
return Arrays.asList(new String[] {group});
}
return new ArrayList<String>();
}
protected void checkTestConf(String expectedValue) {
String configValue = getConf().get(PROVIDER_SPECIFIC_CONF_KEY);
if (configValue == null || !configValue.equals(expectedValue)) {
throw new RuntimeException("Failed to find mandatory configuration of " + PROVIDER_SPECIFIC_CONF_KEY);
}
}
};
private static class UserProvider extends GroupMappingProviderBase {
@Override
public List<String> getGroups(String user) throws IOException {
checkTestConf(PROVIDER_SPECIFIC_CONF_VALUE_FOR_USER);
String group = null;
if (user.equals(john.name)) {
group = john.group;
} else if (user.equals(jack.name)) {
group = jack.group;
}
return toList(group);
}
}
private static class ClusterProvider extends GroupMappingProviderBase {
@Override
public List<String> getGroups(String user) throws IOException {
checkTestConf(PROVIDER_SPECIFIC_CONF_VALUE_FOR_CLUSTER);
String group = null;
if (user.equals(hdfs.name)) {
group = hdfs.group;
} else if (user.equals(jack.name)) { // jack has another group from clusterProvider
group = jack.group2;
}
return toList(group);
}
}
static {
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
CompositeGroupsMapping.class, GroupMappingServiceProvider.class);
conf.set(CompositeGroupsMapping.MAPPING_PROVIDERS_CONFIG_KEY, "userProvider,clusterProvider");
conf.setClass(CompositeGroupsMapping.MAPPING_PROVIDER_CONFIG_PREFIX + ".userProvider",
UserProvider.class, GroupMappingServiceProvider.class);
conf.setClass(CompositeGroupsMapping.MAPPING_PROVIDER_CONFIG_PREFIX + ".clusterProvider",
ClusterProvider.class, GroupMappingServiceProvider.class);
conf.set(CompositeGroupsMapping.MAPPING_PROVIDER_CONFIG_PREFIX +
".clusterProvider" + PROVIDER_SPECIFIC_CONF, PROVIDER_SPECIFIC_CONF_VALUE_FOR_CLUSTER);
conf.set(CompositeGroupsMapping.MAPPING_PROVIDER_CONFIG_PREFIX +
".userProvider" + PROVIDER_SPECIFIC_CONF, PROVIDER_SPECIFIC_CONF_VALUE_FOR_USER);
}
@Test
public void TestMultipleGroupsMapping() throws Exception {
Groups groups = new Groups(conf);
assertTrue(groups.getGroups(john.name).get(0).equals(john.group));
assertTrue(groups.getGroups(hdfs.name).get(0).equals(hdfs.group));
}
@Test
public void TestMultipleGroupsMappingWithCombined() throws Exception {
conf.set(CompositeGroupsMapping.MAPPING_PROVIDERS_COMBINED_CONFIG_KEY, "true");
Groups groups = new Groups(conf);
assertTrue(groups.getGroups(jack.name).size() == 2);
// the configured providers list in order is "userProvider,clusterProvider"
// group -> userProvider, group2 -> clusterProvider
assertTrue(groups.getGroups(jack.name).contains(jack.group));
assertTrue(groups.getGroups(jack.name).contains(jack.group2));
}
@Test
public void TestMultipleGroupsMappingWithoutCombined() throws Exception {
conf.set(CompositeGroupsMapping.MAPPING_PROVIDERS_COMBINED_CONFIG_KEY, "false");
Groups groups = new Groups(conf);
// the configured providers list in order is "userProvider,clusterProvider"
// group -> userProvider, group2 -> clusterProvider
assertTrue(groups.getGroups(jack.name).size() == 1);
assertTrue(groups.getGroups(jack.name).get(0).equals(jack.group));
}
}
| 6,539 | 34.16129 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.NativeCodeLoader;
import org.junit.Test;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public class TestAccessControlList {
private static final Log LOG =
LogFactory.getLog(TestAccessControlList.class);
/**
* Test the netgroups (groups in ACL rules that start with @)
*
* This is a manual test because it requires:
* - host setup
* - native code compiled
* - specify the group mapping class
*
* Host setup:
*
* /etc/nsswitch.conf should have a line like this:
* netgroup: files
*
* /etc/netgroup should be (the whole file):
* lasVegas (,elvis,)
* memphis (,elvis,) (,jerryLeeLewis,)
*
* To run this test:
*
* export JAVA_HOME='path/to/java'
* ant \
* -Dtestcase=TestAccessControlList \
* -Dtest.output=yes \
* -DTestAccessControlListGroupMapping=$className \
* compile-native test
*
* where $className is one of the classes that provide group
* mapping services, i.e. classes that implement
* GroupMappingServiceProvider interface, at this time:
* - org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping
* - org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping
*
*/
@Test
public void testNetgroups() throws Exception {
if(!NativeCodeLoader.isNativeCodeLoaded()) {
LOG.info("Not testing netgroups, " +
"this test only runs when native code is compiled");
return;
}
String groupMappingClassName =
System.getProperty("TestAccessControlListGroupMapping");
if(groupMappingClassName == null) {
LOG.info("Not testing netgroups, no group mapping class specified, " +
"use -DTestAccessControlListGroupMapping=$className to specify " +
"group mapping class (must implement GroupMappingServiceProvider " +
"interface and support netgroups)");
return;
}
LOG.info("Testing netgroups using: " + groupMappingClassName);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_GROUP_MAPPING,
groupMappingClassName);
Groups groups = Groups.getUserToGroupsMappingService(conf);
AccessControlList acl;
// create these ACLs to populate groups cache
acl = new AccessControlList("ja my"); // plain
acl = new AccessControlList("sinatra ratpack,@lasVegas"); // netgroup
acl = new AccessControlList(" somegroup,@someNetgroup"); // no user
// this ACL will be used for testing ACLs
acl = new AccessControlList("carlPerkins ratpack,@lasVegas");
acl.addGroup("@memphis");
// validate the netgroups before and after rehresh to make
// sure refresh works correctly
validateNetgroups(groups, acl);
groups.refresh();
validateNetgroups(groups, acl);
}
/**
* Validate the netgroups, both group membership and ACL
* functionality
*
* Note: assumes a specific acl setup done by testNetgroups
*
* @param groups group to user mapping service
* @param acl ACL set up in a specific way, see testNetgroups
*/
private void validateNetgroups(Groups groups,
AccessControlList acl) throws Exception {
// check that the netgroups are working
List<String> elvisGroups = groups.getGroups("elvis");
assertTrue(elvisGroups.contains("@lasVegas"));
assertTrue(elvisGroups.contains("@memphis"));
List<String> jerryLeeLewisGroups = groups.getGroups("jerryLeeLewis");
assertTrue(jerryLeeLewisGroups.contains("@memphis"));
// allowed because his netgroup is in ACL
UserGroupInformation elvis =
UserGroupInformation.createRemoteUser("elvis");
assertUserAllowed(elvis, acl);
// allowed because he's in ACL
UserGroupInformation carlPerkins =
UserGroupInformation.createRemoteUser("carlPerkins");
assertUserAllowed(carlPerkins, acl);
// not allowed because he's not in ACL and has no netgroups
UserGroupInformation littleRichard =
UserGroupInformation.createRemoteUser("littleRichard");
assertUserNotAllowed(littleRichard, acl);
}
@Test
public void testWildCardAccessControlList() throws Exception {
AccessControlList acl;
acl = new AccessControlList("*");
assertTrue(acl.isAllAllowed());
acl = new AccessControlList(" * ");
assertTrue(acl.isAllAllowed());
acl = new AccessControlList(" *");
assertTrue(acl.isAllAllowed());
acl = new AccessControlList("* ");
assertTrue(acl.isAllAllowed());
}
// Check if AccessControlList.toString() works as expected.
// Also validate if getAclString() for various cases.
@Test
public void testAclString() {
AccessControlList acl;
acl = new AccessControlList("*");
assertTrue(acl.toString().equals("All users are allowed"));
validateGetAclString(acl);
acl = new AccessControlList(" ");
assertTrue(acl.toString().equals("No users are allowed"));
acl = new AccessControlList("user1,user2");
assertTrue(acl.toString().equals("Users [user1, user2] are allowed"));
validateGetAclString(acl);
acl = new AccessControlList("user1,user2 ");// with space
assertTrue(acl.toString().equals("Users [user1, user2] are allowed"));
validateGetAclString(acl);
acl = new AccessControlList(" group1,group2");
assertTrue(acl.toString().equals(
"Members of the groups [group1, group2] are allowed"));
validateGetAclString(acl);
acl = new AccessControlList("user1,user2 group1,group2");
assertTrue(acl.toString().equals(
"Users [user1, user2] and " +
"members of the groups [group1, group2] are allowed"));
validateGetAclString(acl);
}
// Validates if getAclString() is working as expected. i.e. if we can build
// a new ACL instance from the value returned by getAclString().
private void validateGetAclString(AccessControlList acl) {
assertTrue(acl.toString().equals(
new AccessControlList(acl.getAclString()).toString()));
}
@Test
public void testAccessControlList() throws Exception {
AccessControlList acl;
Collection<String> users;
Collection<String> groups;
acl = new AccessControlList("drwho tardis");
users = acl.getUsers();
assertEquals(users.size(), 1);
assertEquals(users.iterator().next(), "drwho");
groups = acl.getGroups();
assertEquals(groups.size(), 1);
assertEquals(groups.iterator().next(), "tardis");
acl = new AccessControlList("drwho");
users = acl.getUsers();
assertEquals(users.size(), 1);
assertEquals(users.iterator().next(), "drwho");
groups = acl.getGroups();
assertEquals(groups.size(), 0);
acl = new AccessControlList("drwho ");
users = acl.getUsers();
assertEquals(users.size(), 1);
assertEquals(users.iterator().next(), "drwho");
groups = acl.getGroups();
assertEquals(groups.size(), 0);
acl = new AccessControlList(" tardis");
users = acl.getUsers();
assertEquals(users.size(), 0);
groups = acl.getGroups();
assertEquals(groups.size(), 1);
assertEquals(groups.iterator().next(), "tardis");
Iterator<String> iter;
acl = new AccessControlList("drwho,joe tardis, users");
users = acl.getUsers();
assertEquals(users.size(), 2);
iter = users.iterator();
assertEquals(iter.next(), "drwho");
assertEquals(iter.next(), "joe");
groups = acl.getGroups();
assertEquals(groups.size(), 2);
iter = groups.iterator();
assertEquals(iter.next(), "tardis");
assertEquals(iter.next(), "users");
}
/**
* Test addUser/Group and removeUser/Group api.
*/
@Test
public void testAddRemoveAPI() {
AccessControlList acl;
Collection<String> users;
Collection<String> groups;
acl = new AccessControlList(" ");
assertEquals(0, acl.getUsers().size());
assertEquals(0, acl.getGroups().size());
assertEquals(" ", acl.getAclString());
acl.addUser("drwho");
users = acl.getUsers();
assertEquals(users.size(), 1);
assertEquals(users.iterator().next(), "drwho");
assertEquals("drwho ", acl.getAclString());
acl.addGroup("tardis");
groups = acl.getGroups();
assertEquals(groups.size(), 1);
assertEquals(groups.iterator().next(), "tardis");
assertEquals("drwho tardis", acl.getAclString());
acl.addUser("joe");
acl.addGroup("users");
users = acl.getUsers();
assertEquals(users.size(), 2);
Iterator<String> iter = users.iterator();
assertEquals(iter.next(), "drwho");
assertEquals(iter.next(), "joe");
groups = acl.getGroups();
assertEquals(groups.size(), 2);
iter = groups.iterator();
assertEquals(iter.next(), "tardis");
assertEquals(iter.next(), "users");
assertEquals("drwho,joe tardis,users", acl.getAclString());
acl.removeUser("joe");
acl.removeGroup("users");
users = acl.getUsers();
assertEquals(users.size(), 1);
assertFalse(users.contains("joe"));
groups = acl.getGroups();
assertEquals(groups.size(), 1);
assertFalse(groups.contains("users"));
assertEquals("drwho tardis", acl.getAclString());
acl.removeGroup("tardis");
groups = acl.getGroups();
assertEquals(0, groups.size());
assertFalse(groups.contains("tardis"));
assertEquals("drwho ", acl.getAclString());
acl.removeUser("drwho");
assertEquals(0, users.size());
assertFalse(users.contains("drwho"));
assertEquals(0, acl.getGroups().size());
assertEquals(0, acl.getUsers().size());
assertEquals(" ", acl.getAclString());
}
/**
* Tests adding/removing wild card as the user/group.
*/
@Test
public void testAddRemoveWildCard() {
AccessControlList acl = new AccessControlList("drwho tardis");
Throwable th = null;
try {
acl.addUser(" * ");
} catch (Throwable t) {
th = t;
}
assertNotNull(th);
assertTrue(th instanceof IllegalArgumentException);
th = null;
try {
acl.addGroup(" * ");
} catch (Throwable t) {
th = t;
}
assertNotNull(th);
assertTrue(th instanceof IllegalArgumentException);
th = null;
try {
acl.removeUser(" * ");
} catch (Throwable t) {
th = t;
}
assertNotNull(th);
assertTrue(th instanceof IllegalArgumentException);
th = null;
try {
acl.removeGroup(" * ");
} catch (Throwable t) {
th = t;
}
assertNotNull(th);
assertTrue(th instanceof IllegalArgumentException);
}
/**
* Tests adding user/group to an wild card acl.
*/
@Test
public void testAddRemoveToWildCardACL() {
AccessControlList acl = new AccessControlList(" * ");
assertTrue(acl.isAllAllowed());
UserGroupInformation drwho =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "aliens" });
UserGroupInformation drwho2 =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "tardis" });
acl.addUser("drwho");
assertTrue(acl.isAllAllowed());
assertFalse(acl.getAclString().contains("drwho"));
acl.addGroup("tardis");
assertTrue(acl.isAllAllowed());
assertFalse(acl.getAclString().contains("tardis"));
acl.removeUser("drwho");
assertTrue(acl.isAllAllowed());
assertUserAllowed(drwho, acl);
acl.removeGroup("tardis");
assertTrue(acl.isAllAllowed());
assertUserAllowed(drwho2, acl);
}
/**
* Verify the method isUserAllowed()
*/
@Test
public void testIsUserAllowed() {
AccessControlList acl;
UserGroupInformation drwho =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "aliens", "humanoids", "timelord" });
UserGroupInformation susan =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "aliens", "humanoids", "timelord" });
UserGroupInformation barbara =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "humans", "teachers" });
UserGroupInformation ian =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "humans", "teachers" });
acl = new AccessControlList("drwho humanoids");
assertUserAllowed(drwho, acl);
assertUserAllowed(susan, acl);
assertUserNotAllowed(barbara, acl);
assertUserNotAllowed(ian, acl);
acl = new AccessControlList("drwho");
assertUserAllowed(drwho, acl);
assertUserNotAllowed(susan, acl);
assertUserNotAllowed(barbara, acl);
assertUserNotAllowed(ian, acl);
acl = new AccessControlList("drwho ");
assertUserAllowed(drwho, acl);
assertUserNotAllowed(susan, acl);
assertUserNotAllowed(barbara, acl);
assertUserNotAllowed(ian, acl);
acl = new AccessControlList(" humanoids");
assertUserAllowed(drwho, acl);
assertUserAllowed(susan, acl);
assertUserNotAllowed(barbara, acl);
assertUserNotAllowed(ian, acl);
acl = new AccessControlList("drwho,ian aliens,teachers");
assertUserAllowed(drwho, acl);
assertUserAllowed(susan, acl);
assertUserAllowed(barbara, acl);
assertUserAllowed(ian, acl);
}
private void assertUserAllowed(UserGroupInformation ugi,
AccessControlList acl) {
assertTrue("User " + ugi + " is not granted the access-control!!",
acl.isUserAllowed(ugi));
}
private void assertUserNotAllowed(UserGroupInformation ugi,
AccessControlList acl) {
assertFalse("User " + ugi
+ " is incorrectly granted the access-control!!",
acl.isUserAllowed(ugi));
}
}
| 15,178 | 31.503212 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyServers.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
public class TestProxyServers {
@Test
public void testProxyServer() {
Configuration conf = new Configuration();
assertFalse(ProxyServers.isProxyServer("1.1.1.1"));
conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS, "2.2.2.2, 3.3.3.3");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
assertFalse(ProxyServers.isProxyServer("1.1.1.1"));
assertTrue(ProxyServers.isProxyServer("2.2.2.2"));
assertTrue(ProxyServers.isProxyServer("3.3.3.3"));
}
}
| 1,491 | 37.25641 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestServiceAuthorization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Test;
public class TestServiceAuthorization {
private static final String ACL_CONFIG = "test.protocol.acl";
private static final String ACL_CONFIG1 = "test.protocol1.acl";
private static final String ADDRESS = "0.0.0.0";
private static final String HOST_CONFIG = "test.protocol.hosts";
private static final String BLOCKED_HOST_CONFIG = "test.protocol.hosts.blocked";
private static final String AUTHORIZED_IP = "1.2.3.4";
private static final String UNAUTHORIZED_IP = "1.2.3.5";
private static final String IP_RANGE = "10.222.0.0/16,10.113.221.221";
public interface TestProtocol1 extends TestProtocol {};
private static class TestPolicyProvider extends PolicyProvider {
@Override
public Service[] getServices() {
return new Service[] { new Service(ACL_CONFIG, TestProtocol.class),
new Service(ACL_CONFIG1, TestProtocol1.class),
};
}
}
@Test
public void testDefaultAcl() {
ServiceAuthorizationManager serviceAuthorizationManager =
new ServiceAuthorizationManager();
Configuration conf = new Configuration ();
// test without setting a default acl
conf.set(ACL_CONFIG, "user1 group1");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
AccessControlList acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
assertEquals("user1 group1", acl.getAclString());
acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
assertEquals(AccessControlList.WILDCARD_ACL_VALUE, acl.getAclString());
// test with a default acl
conf.set(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL,
"user2 group2");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
assertEquals("user1 group1", acl.getAclString());
acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
assertEquals("user2 group2", acl.getAclString());
}
@Test
public void testBlockedAcl() throws UnknownHostException {
UserGroupInformation drwho =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "group1", "group2" });
ServiceAuthorizationManager serviceAuthorizationManager =
new ServiceAuthorizationManager();
Configuration conf = new Configuration ();
// test without setting a blocked acl
conf.set(ACL_CONFIG, "user1 group1");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
// now set a blocked acl with another user and another group
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "drwho2 group3");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
// now set a blocked acl with the user and another group
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "drwho group3");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
fail();
} catch (AuthorizationException e) {
}
// now set a blocked acl with another user and another group
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "drwho2 group3");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
// now set a blocked acl with another user and group that the user belongs to
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "drwho2 group2");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
fail();
} catch (AuthorizationException e) {
// expects Exception
}
// reset blocked acl so that there is no blocked ACL
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
}
@Test
public void testDefaultBlockedAcl() throws UnknownHostException {
UserGroupInformation drwho =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "group1", "group2" });
ServiceAuthorizationManager serviceAuthorizationManager =
new ServiceAuthorizationManager();
Configuration conf = new Configuration ();
// test without setting a default blocked acl
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol1.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
// set a restrictive default blocked acl and an non-restricting blocked acl for TestProtocol
conf.set(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL,
"user2 group2");
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "user2");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
// drwho is authorized to access TestProtocol
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
// drwho is not authorized to access TestProtocol1 because it uses the default blocked acl.
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol1.class, conf,
InetAddress.getByName(ADDRESS));
fail();
} catch (AuthorizationException e) {
// expects Exception
}
}
@Test
public void testMachineList() throws UnknownHostException {
UserGroupInformation drwho =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "group1", "group2" });
ServiceAuthorizationManager serviceAuthorizationManager =
new ServiceAuthorizationManager();
Configuration conf = new Configuration ();
conf.set(HOST_CONFIG, "1.2.3.4");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(AUTHORIZED_IP));
} catch (AuthorizationException e) {
fail();
}
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(UNAUTHORIZED_IP));
fail();
} catch (AuthorizationException e) {
// expects Exception
}
}
@Test
public void testDefaultMachineList() throws UnknownHostException {
UserGroupInformation drwho =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "group1", "group2" });
ServiceAuthorizationManager serviceAuthorizationManager =
new ServiceAuthorizationManager();
Configuration conf = new Configuration ();
// test without setting a default MachineList
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(UNAUTHORIZED_IP));
} catch (AuthorizationException e) {
fail();
}
// test with a default MachineList
conf.set(
"security.service.authorization.default.hosts",
IP_RANGE);
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(UNAUTHORIZED_IP));
fail();
} catch (AuthorizationException e) {
// expects Exception
}
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName("10.222.0.0"));
} catch (AuthorizationException e) {
fail();
}
}
@Test
public void testBlockedMachineList() throws UnknownHostException {
UserGroupInformation drwho =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "group1", "group2" });
ServiceAuthorizationManager serviceAuthorizationManager =
new ServiceAuthorizationManager();
Configuration conf = new Configuration ();
// test without setting a blocked MachineList
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho,
TestProtocol.class, conf, InetAddress.getByName("10.222.0.0"));
} catch (AuthorizationException e) {
fail();
}
// now set a blocked MachineList
conf.set(BLOCKED_HOST_CONFIG, IP_RANGE);
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho,
TestProtocol.class, conf, InetAddress.getByName("10.222.0.0"));
fail();
} catch (AuthorizationException e) {
// expects Exception
}
// reset blocked MachineList
conf.set(BLOCKED_HOST_CONFIG, "");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho,
TestProtocol.class, conf, InetAddress.getByName("10.222.0.0"));
} catch (AuthorizationException e) {
fail();
}
}
@Test
public void testDefaultBlockedMachineList() throws UnknownHostException {
UserGroupInformation drwho =
UserGroupInformation.createUserForTesting("[email protected]",
new String[] { "group1", "group2" });
ServiceAuthorizationManager serviceAuthorizationManager =
new ServiceAuthorizationManager();
Configuration conf = new Configuration ();
// test without setting a default blocked MachineList
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho,
TestProtocol1.class, conf, InetAddress.getByName("10.222.0.0"));
} catch (AuthorizationException e) {
fail();
}
// set a default blocked MachineList and a blocked MachineList for TestProtocol
conf.set(
"security.service.authorization.default.hosts.blocked",
IP_RANGE);
conf.set(BLOCKED_HOST_CONFIG, "1.2.3.4");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
// TestProtocol can be accessed from "10.222.0.0" because it blocks only "1.2.3.4"
try {
serviceAuthorizationManager.authorize(drwho,
TestProtocol.class, conf, InetAddress.getByName("10.222.0.0"));
} catch (AuthorizationException e) {
fail();
}
// TestProtocol cannot be accessed from "1.2.3.4"
try {
serviceAuthorizationManager.authorize(drwho,
TestProtocol.class, conf, InetAddress.getByName("1.2.3.4"));
fail();
} catch (AuthorizationException e) {
//expects Exception
}
// TestProtocol1 can be accessed from "1.2.3.4" because it uses default block list
try {
serviceAuthorizationManager.authorize(drwho,
TestProtocol1.class, conf, InetAddress.getByName("1.2.3.4"));
} catch (AuthorizationException e) {
fail();
}
// TestProtocol1 cannot be accessed from "10.222.0.0",
// because "10.222.0.0" is in default block list
try {
serviceAuthorizationManager.authorize(drwho,
TestProtocol1.class, conf, InetAddress.getByName("10.222.0.0"));
fail();
} catch (AuthorizationException e) {
//expects Exception
}
}
}
| 13,575 | 38.236994 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestProxyUsers.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.authorize;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.security.SecureRandom;
import java.util.Arrays;
import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.NativeCodeLoader;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
public class TestProxyUsers {
private static final Log LOG =
LogFactory.getLog(TestProxyUsers.class);
private static final String REAL_USER_NAME = "proxier";
private static final String PROXY_USER_NAME = "proxied_user";
private static final String AUTHORIZED_PROXY_USER_NAME = "authorized_proxied_user";
private static final String[] GROUP_NAMES =
new String[] { "foo_group" };
private static final String[] NETGROUP_NAMES =
new String[] { "@foo_group" };
private static final String[] OTHER_GROUP_NAMES =
new String[] { "bar_group" };
private static final String[] SUDO_GROUP_NAMES =
new String[] { "sudo_proxied_user" };
private static final String PROXY_IP = "1.2.3.4";
private static final String PROXY_IP_RANGE = "10.222.0.0/16,10.113.221.221";
/**
* Test the netgroups (groups in ACL rules that start with @)
*
* This is a manual test because it requires:
* - host setup
* - native code compiled
* - specify the group mapping class
*
* Host setup:
*
* /etc/nsswitch.conf should have a line like this:
* netgroup: files
*
* /etc/netgroup should be (the whole file):
* foo_group (,proxied_user,)
*
* To run this test:
*
* export JAVA_HOME='path/to/java'
* mvn test \
* -Dtest=TestProxyUsers \
* -DTestProxyUsersGroupMapping=$className \
*
* where $className is one of the classes that provide group
* mapping services, i.e. classes that implement
* GroupMappingServiceProvider interface, at this time:
* - org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping
* - org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping
*
*/
@Test
public void testNetgroups () throws IOException{
if(!NativeCodeLoader.isNativeCodeLoaded()) {
LOG.info("Not testing netgroups, " +
"this test only runs when native code is compiled");
return;
}
String groupMappingClassName =
System.getProperty("TestProxyUsersGroupMapping");
if(groupMappingClassName == null) {
LOG.info("Not testing netgroups, no group mapping class specified, " +
"use -DTestProxyUsersGroupMapping=$className to specify " +
"group mapping class (must implement GroupMappingServiceProvider " +
"interface and support netgroups)");
return;
}
LOG.info("Testing netgroups using: " + groupMappingClassName);
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_GROUP_MAPPING,
groupMappingClassName);
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(NETGROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Groups groups = Groups.getUserToGroupsMappingService(conf);
// try proxying a group that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, groups.getGroups(PROXY_USER_NAME).toArray(
new String[groups.getGroups(PROXY_USER_NAME).size()]));
assertAuthorized(proxyUserUgi, PROXY_IP);
}
@Test
public void testProxyUsers() throws Exception {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
// First try proxying a group that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
// Now try proxying a group that's not allowed
realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME);
proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, OTHER_GROUP_NAMES);
// From good IP
assertNotAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
}
@Test
public void testProxyUsersWithUserConf() throws Exception {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserUserConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
// First try proxying a user that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
// Now try proxying a user that's not allowed
realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME);
proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertNotAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
}
@Test
public void testWildcardGroup() {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
"*");
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
// First try proxying a group that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
// Now try proxying a different group (just to make sure we aren't getting spill over
// from the other test case!)
realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME);
proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, OTHER_GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
}
@Test
public void testWildcardUser() {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserUserConfKey(REAL_USER_NAME),
"*");
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
// First try proxying a user that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
// Now try proxying a different user (just to make sure we aren't getting spill over
// from the other test case!)
realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME);
proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, OTHER_GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
}
@Test
public void testWildcardIP() {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
// First try proxying a group that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From either IP should be fine
assertAuthorized(proxyUserUgi, "1.2.3.4");
assertAuthorized(proxyUserUgi, "1.2.3.5");
// Now set up an unallowed group
realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME);
proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, OTHER_GROUP_NAMES);
// Neither IP should be OK
assertNotAuthorized(proxyUserUgi, "1.2.3.4");
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
}
@Test
public void testIPRange() {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
"*");
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP_RANGE);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
// First try proxying a group that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "10.222.0.0");
// From bad IP
assertNotAuthorized(proxyUserUgi, "10.221.0.0");
}
@Test
public void testWithDuplicateProxyGroups() throws Exception {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES,GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Collection<String> groupsToBeProxied =
ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME));
assertEquals (1,groupsToBeProxied.size());
}
@Test
public void testWithDuplicateProxyHosts() throws Exception {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider()
.getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(PROXY_IP,PROXY_IP)));
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Collection<String> hosts =
ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME));
assertEquals (1,hosts.size());
}
@Test
public void testProxyUsersWithProviderOverride() throws Exception {
Configuration conf = new Configuration();
conf.set(
CommonConfigurationKeysPublic.HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS,
"org.apache.hadoop.security.authorize.TestProxyUsers$TestDummyImpersonationProvider");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
// First try proxying a group that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createUserForTesting(REAL_USER_NAME, SUDO_GROUP_NAMES);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertAuthorized(proxyUserUgi, "1.2.3.5");
// Now try proxying a group that's not allowed
realUserUgi = UserGroupInformation
.createUserForTesting(REAL_USER_NAME, GROUP_NAMES);
proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertNotAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
}
@Test
public void testWithProxyGroupsAndUsersWithSpaces() throws Exception {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserUserConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(PROXY_USER_NAME + " ",AUTHORIZED_PROXY_USER_NAME, "ONEMORE")));
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Collection<String> groupsToBeProxied =
ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME));
assertEquals (GROUP_NAMES.length, groupsToBeProxied.size());
}
@Test(expected = IllegalArgumentException.class)
public void testProxyUsersWithNullPrefix() throws Exception {
ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration(false),
null);
}
@Test(expected = IllegalArgumentException.class)
public void testProxyUsersWithEmptyPrefix() throws Exception {
ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration(false),
"");
}
@Test
public void testProxyUsersWithCustomPrefix() throws Exception {
Configuration conf = new Configuration(false);
conf.set("x." + REAL_USER_NAME + ".users",
StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
conf.set("x." + REAL_USER_NAME+ ".hosts", PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf, "x");
// First try proxying a user that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
// Now try proxying a user that's not allowed
realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME);
proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertNotAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
}
@Test
public void testNoHostsForUsers() throws Exception {
Configuration conf = new Configuration(false);
conf.set("y." + REAL_USER_NAME + ".users",
StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
ProxyUsers.refreshSuperUserGroupsConfiguration(conf, "y");
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// IP doesn't matter
assertNotAuthorized(proxyUserUgi, "1.2.3.4");
}
private void assertNotAuthorized(UserGroupInformation proxyUgi, String host) {
try {
ProxyUsers.authorize(proxyUgi, host);
fail("Allowed authorization of " + proxyUgi + " from " + host);
} catch (AuthorizationException e) {
// Expected
}
}
private void assertAuthorized(UserGroupInformation proxyUgi, String host) {
try {
ProxyUsers.authorize(proxyUgi, host);
} catch (AuthorizationException e) {
fail("Did not allow authorization of " + proxyUgi + " from " + host);
}
}
static class TestDummyImpersonationProvider implements ImpersonationProvider {
@Override
public void init(String configurationPrefix) {
}
/**
* Authorize a user (superuser) to impersonate another user (user1) if the
* superuser belongs to the group "sudo_user1" .
*/
public void authorize(UserGroupInformation user,
String remoteAddress) throws AuthorizationException{
UserGroupInformation superUser = user.getRealUser();
String sudoGroupName = "sudo_" + user.getShortUserName();
if (!Arrays.asList(superUser.getGroupNames()).contains(sudoGroupName)){
throw new AuthorizationException("User: " + superUser.getUserName()
+ " is not allowed to impersonate " + user.getUserName());
}
}
@Override
public void setConf(Configuration conf) {
}
@Override
public Configuration getConf() {
return null;
}
}
public static void loadTest(String ipString, int testRange) {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
ipString
);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
// First try proxying a group that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
long startTime = System.nanoTime();
SecureRandom sr = new SecureRandom();
for (int i=1; i < 1000000; i++){
try {
ProxyUsers.authorize(proxyUserUgi, "1.2.3."+ sr.nextInt(testRange));
} catch (AuthorizationException e) {
}
}
long stopTime = System.nanoTime();
long elapsedTime = stopTime - startTime;
System.out.println(elapsedTime/1000000 + " ms");
}
/**
* invokes the load Test
* A few sample invocations are as below
* TestProxyUsers ip 128 256
* TestProxyUsers range 1.2.3.0/25 256
* TestProxyUsers ip 4 8
* TestProxyUsers range 1.2.3.0/30 8
* @param args
*/
public static void main (String[] args){
String ipValues = null;
if (args.length != 3 || (!args[0].equals("ip") && !args[0].equals("range"))) {
System.out.println("Invalid invocation. The right syntax is ip/range <numberofIps/cidr> <testRange>");
}
else {
if (args[0].equals("ip")){
int numberOfIps = Integer.parseInt(args[1]);
StringBuilder sb = new StringBuilder();
for (int i=0; i < numberOfIps; i++){
sb.append("1.2.3."+ i + ",");
}
ipValues = sb.toString();
}
else if (args[0].equals("range")){
ipValues = args[1];
}
int testRange = Integer.parseInt(args[2]);
loadTest(ipValues, testRange);
}
}
}
| 22,257 | 35.133117 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.alias;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.ProviderUtils;
import org.junit.Test;
import java.net.URI;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertArrayEquals;
public class TestCredentialProvider {
@Test
public void testCredentialEntry() throws Exception {
char[] key1 = new char[]{1,2,3,4};
CredentialProvider.CredentialEntry obj =
new CredentialProvider.CredentialEntry("cred1", key1);
assertEquals("cred1", obj.getAlias());
assertArrayEquals(new char[]{1,2,3,4}, obj.getCredential());
}
@Test
public void testUnnestUri() throws Exception {
assertEquals(new Path("hdfs://nn.example.com/my/path"),
ProviderUtils.unnestUri(new URI("myscheme://[email protected]/my/path")));
assertEquals(new Path("hdfs://nn/my/path?foo=bar&baz=bat#yyy"),
ProviderUtils.unnestUri(new URI("myscheme://hdfs@nn/my/path?foo=bar&baz=bat#yyy")));
assertEquals(new Path("inner://[email protected]/my/path"),
ProviderUtils.unnestUri(new URI("outer://inner@[email protected]/my/path")));
assertEquals(new Path("user:///"),
ProviderUtils.unnestUri(new URI("outer://user/")));
}
}
| 2,062 | 38.673077 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredentialProviderFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.alias;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestName;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class TestCredentialProviderFactory {
public static final Log LOG = LogFactory.getLog(TestCredentialProviderFactory.class);
@Rule
public final TestName test = new TestName();
@Before
public void announce() {
LOG.info("Running test " + test.getMethodName());
}
private static char[] chars = { 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'j', 'k', 'm', 'n', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'2', '3', '4', '5', '6', '7', '8', '9',};
private static final File tmpDir =
new File(System.getProperty("test.build.data", "/tmp"), "creds");
@Test
public void testFactory() throws Exception {
Configuration conf = new Configuration();
final String userUri = UserProvider.SCHEME_NAME + ":///";
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
final String jksUri = JavaKeyStoreProvider.SCHEME_NAME +
"://file" + jksPath.toUri();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
userUri + "," + jksUri);
List<CredentialProvider> providers =
CredentialProviderFactory.getProviders(conf);
assertEquals(2, providers.size());
assertEquals(UserProvider.class, providers.get(0).getClass());
assertEquals(JavaKeyStoreProvider.class, providers.get(1).getClass());
assertEquals(userUri, providers.get(0).toString());
assertEquals(jksUri, providers.get(1).toString());
}
@Test
public void testFactoryErrors() throws Exception {
Configuration conf = new Configuration();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, "unknown:///");
try {
List<CredentialProvider> providers =
CredentialProviderFactory.getProviders(conf);
assertTrue("should throw!", false);
} catch (IOException e) {
assertEquals("No CredentialProviderFactory for unknown:/// in " +
CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
e.getMessage());
}
}
@Test
public void testUriErrors() throws Exception {
Configuration conf = new Configuration();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, "unkn@own:/x/y");
try {
List<CredentialProvider> providers =
CredentialProviderFactory.getProviders(conf);
assertTrue("should throw!", false);
} catch (IOException e) {
assertEquals("Bad configuration of " +
CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH +
" at unkn@own:/x/y", e.getMessage());
}
}
private static char[] generatePassword(int length) {
StringBuffer sb = new StringBuffer();
Random r = new Random();
for (int i = 0; i < length; i++) {
sb.append(chars[r.nextInt(chars.length)]);
}
return sb.toString().toCharArray();
}
static void checkSpecificProvider(Configuration conf,
String ourUrl) throws Exception {
CredentialProvider provider =
CredentialProviderFactory.getProviders(conf).get(0);
char[] passwd = generatePassword(16);
// ensure that we get nulls when the key isn't there
assertEquals(null, provider.getCredentialEntry("no-such-key"));
assertEquals(null, provider.getCredentialEntry("key"));
// create a new key
try {
provider.createCredentialEntry("pass", passwd);
} catch (Exception e) {
e.printStackTrace();
throw e;
}
// make sure we get back the right key
assertArrayEquals(passwd, provider.getCredentialEntry("pass").getCredential());
// try recreating pass
try {
provider.createCredentialEntry("pass", passwd);
assertTrue("should throw", false);
} catch (IOException e) {
assertEquals("Credential pass already exists in " + ourUrl, e.getMessage());
}
provider.deleteCredentialEntry("pass");
try {
provider.deleteCredentialEntry("pass");
assertTrue("should throw", false);
} catch (IOException e) {
assertEquals("Credential pass does not exist in " + ourUrl, e.getMessage());
}
char[] passTwo = new char[]{'1', '2', '3'};
provider.createCredentialEntry("pass", passwd);
provider.createCredentialEntry("pass2", passTwo);
assertArrayEquals(passTwo,
provider.getCredentialEntry("pass2").getCredential());
// write them to disk so that configuration.getPassword will find them
provider.flush();
// configuration.getPassword should get this from provider
assertArrayEquals(passTwo, conf.getPassword("pass2"));
// configuration.getPassword should get this from config
conf.set("onetwothree", "123");
assertArrayEquals(passTwo, conf.getPassword("onetwothree"));
// configuration.getPassword should NOT get this from config since
// we are disabling the fallback to clear text config
conf.set(CredentialProvider.CLEAR_TEXT_FALLBACK, "false");
assertArrayEquals(null, conf.getPassword("onetwothree"));
// get a new instance of the provider to ensure it was saved correctly
provider = CredentialProviderFactory.getProviders(conf).get(0);
assertTrue(provider != null);
assertArrayEquals(new char[]{'1', '2', '3'},
provider.getCredentialEntry("pass2").getCredential());
assertArrayEquals(passwd, provider.getCredentialEntry("pass").getCredential());
List<String> creds = provider.getAliases();
assertTrue("Credentials should have been returned.", creds.size() == 2);
assertTrue("Returned Credentials should have included pass.", creds.contains("pass"));
assertTrue("Returned Credentials should have included pass2.", creds.contains("pass2"));
}
@Test
public void testUserProvider() throws Exception {
Configuration conf = new Configuration();
final String ourUrl = UserProvider.SCHEME_NAME + ":///";
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
// see if the credentials are actually in the UGI
Credentials credentials =
UserGroupInformation.getCurrentUser().getCredentials();
assertArrayEquals(new byte[]{'1', '2', '3'},
credentials.getSecretKey(new Text("pass2")));
}
@Test
public void testJksProvider() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(tmpDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist", file.isFile());
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
}
@Test
public void testLocalJksProvider() throws Exception {
Configuration conf = new Configuration();
final Path jksPath = new Path(tmpDir.toString(), "test.jks");
final String ourUrl =
LocalJavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(tmpDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
checkSpecificProvider(conf, ourUrl);
Path path = ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue("Unexpected permissions: " + s.getPermission().toString(), s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist", file.isFile());
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
}
public void checkPermissionRetention(Configuration conf, String ourUrl,
Path path) throws Exception {
CredentialProvider provider = CredentialProviderFactory.getProviders(conf).get(0);
// let's add a new credential and flush and check that permissions are still set to 777
char[] cred = new char[32];
for(int i =0; i < cred.length; ++i) {
cred[i] = (char) i;
}
// create a new key
try {
provider.createCredentialEntry("key5", cred);
} catch (Exception e) {
e.printStackTrace();
throw e;
}
provider.flush();
// get a new instance of the provider to ensure it was saved correctly
provider = CredentialProviderFactory.getProviders(conf).get(0);
assertArrayEquals(cred, provider.getCredentialEntry("key5").getCredential());
FileSystem fs = path.getFileSystem(conf);
FileStatus s = fs.getFileStatus(path);
assertTrue("Permissions should have been retained from the preexisting " +
"keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
}
| 10,820 | 38.492701 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.alias;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.junit.Before;
import org.junit.Test;
public class TestCredShell {
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final ByteArrayOutputStream errContent = new ByteArrayOutputStream();
private static final File tmpDir =
new File(System.getProperty("test.build.data", "/tmp"), "creds");
/* The default JCEKS provider - for testing purposes */
private String jceksProvider;
@Before
public void setup() throws Exception {
System.setOut(new PrintStream(outContent));
System.setErr(new PrintStream(errContent));
final Path jksPath = new Path(tmpDir.toString(), "keystore.jceks");
new File(jksPath.toString()).delete();
jceksProvider = "jceks://file" + jksPath.toUri();
}
@Test
public void testCredentialSuccessfulLifecycle() throws Exception {
outContent.reset();
String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider",
jceksProvider};
int rc = 0;
CredentialShell cs = new CredentialShell();
cs.setConf(new Configuration());
rc = cs.run(args1);
assertEquals(outContent.toString(), 0, rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " +
"created."));
outContent.reset();
String[] args2 = {"list", "-provider",
jceksProvider};
rc = cs.run(args2);
assertEquals(0, rc);
assertTrue(outContent.toString().contains("credential1"));
outContent.reset();
String[] args4 = {"delete", "credential1", "-f", "-provider",
jceksProvider};
rc = cs.run(args4);
assertEquals(0, rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " +
"deleted."));
outContent.reset();
String[] args5 = {"list", "-provider",
jceksProvider};
rc = cs.run(args5);
assertEquals(0, rc);
assertFalse(outContent.toString(), outContent.toString().contains("credential1"));
}
@Test
public void testInvalidProvider() throws Exception {
String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider",
"sdff://file/tmp/credstore.jceks"};
int rc = 0;
CredentialShell cs = new CredentialShell();
cs.setConf(new Configuration());
rc = cs.run(args1);
assertEquals(1, rc);
assertTrue(outContent.toString().contains("There are no valid " +
"CredentialProviders configured."));
}
@Test
public void testTransientProviderWarning() throws Exception {
String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider",
"user:///"};
int rc = 0;
CredentialShell cs = new CredentialShell();
cs.setConf(new Configuration());
rc = cs.run(args1);
assertEquals(outContent.toString(), 0, rc);
assertTrue(outContent.toString().contains("WARNING: you are modifying a " +
"transient provider."));
String[] args2 = {"delete", "credential1", "-f", "-provider", "user:///"};
rc = cs.run(args2);
assertEquals(outContent.toString(), 0, rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " +
"deleted."));
}
@Test
public void testTransientProviderOnlyConfig() throws Exception {
String[] args1 = {"create", "credential1"};
int rc = 0;
CredentialShell cs = new CredentialShell();
Configuration config = new Configuration();
config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, "user:///");
cs.setConf(config);
rc = cs.run(args1);
assertEquals(1, rc);
assertTrue(outContent.toString().contains("There are no valid " +
"CredentialProviders configured."));
}
@Test
public void testPromptForCredentialWithEmptyPasswd() throws Exception {
String[] args1 = {"create", "credential1", "-provider",
jceksProvider};
ArrayList<String> passwords = new ArrayList<String>();
passwords.add(null);
passwords.add("p@ssw0rd");
int rc = 0;
CredentialShell shell = new CredentialShell();
shell.setConf(new Configuration());
shell.setPasswordReader(new MockPasswordReader(passwords));
rc = shell.run(args1);
assertEquals(outContent.toString(), 1, rc);
assertTrue(outContent.toString().contains("Passwords don't match"));
}
@Test
public void testPromptForCredential() throws Exception {
String[] args1 = {"create", "credential1", "-provider",
jceksProvider};
ArrayList<String> passwords = new ArrayList<String>();
passwords.add("p@ssw0rd");
passwords.add("p@ssw0rd");
int rc = 0;
CredentialShell shell = new CredentialShell();
shell.setConf(new Configuration());
shell.setPasswordReader(new MockPasswordReader(passwords));
rc = shell.run(args1);
assertEquals(0, rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " +
"created."));
String[] args2 = {"delete", "credential1", "-f", "-provider",
jceksProvider};
rc = shell.run(args2);
assertEquals(0, rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " +
"deleted."));
}
public class MockPasswordReader extends CredentialShell.PasswordReader {
List<String> passwords = null;
public MockPasswordReader(List<String> passwds) {
passwords = passwds;
}
@Override
public char[] readPassword(String prompt) {
if (passwords.size() == 0) return null;
String pass = passwords.remove(0);
return pass == null ? null : pass.toCharArray();
}
@Override
public void format(String message) {
System.out.println(message);
}
}
@Test
public void testEmptyArgList() throws Exception {
CredentialShell shell = new CredentialShell();
shell.setConf(new Configuration());
assertEquals(1, shell.init(new String[0]));
}
@Test
public void testCommandHelpExitsNormally() throws Exception {
for (String cmd : Arrays.asList("create", "list", "delete")) {
CredentialShell shell = new CredentialShell();
shell.setConf(new Configuration());
assertEquals("Expected help argument on " + cmd + " to return 0",
0, shell.init(new String[] {cmd, "-help"}));
}
}
@Test
public void testEmptyArgForCommands() throws Exception {
CredentialShell shell = new CredentialShell();
String[] command = { "list", "-provider" };
assertEquals("Expected empty argument on " + command + " to return 1", 1,
shell.init(command));
for (String cmd : Arrays.asList("create", "delete")) {
shell.setConf(new Configuration());
assertEquals("Expected empty argument on " + cmd + " to return 1", 1,
shell.init(new String[] { cmd }));
}
}
}
| 7,953 | 33.4329 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token;
import java.io.*;
import java.util.Arrays;
import org.apache.hadoop.io.*;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
import junit.framework.TestCase;
/** Unit tests for Token */
public class TestToken extends TestCase {
static boolean isEqual(Object a, Object b) {
return a == null ? b == null : a.equals(b);
}
static boolean checkEqual(Token<TokenIdentifier> a, Token<TokenIdentifier> b) {
return Arrays.equals(a.getIdentifier(), b.getIdentifier())
&& Arrays.equals(a.getPassword(), b.getPassword())
&& isEqual(a.getKind(), b.getKind())
&& isEqual(a.getService(), b.getService());
}
/**
* Test token serialization
*/
public void testTokenSerialization() throws IOException {
// Get a token
Token<TokenIdentifier> sourceToken = new Token<TokenIdentifier>();
sourceToken.setService(new Text("service"));
// Write it to an output buffer
DataOutputBuffer out = new DataOutputBuffer();
sourceToken.write(out);
// Read the token back
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
Token<TokenIdentifier> destToken = new Token<TokenIdentifier>();
destToken.readFields(in);
assertTrue(checkEqual(sourceToken, destToken));
}
private static void checkUrlSafe(String str) throws Exception {
int len = str.length();
for(int i=0; i < len; ++i) {
char ch = str.charAt(i);
if (ch == '-') continue;
if (ch == '_') continue;
if (ch >= '0' && ch <= '9') continue;
if (ch >= 'A' && ch <= 'Z') continue;
if (ch >= 'a' && ch <= 'z') continue;
fail("Encoded string " + str +
" has invalid character at position " + i);
}
}
public static void testEncodeWritable() throws Exception {
String[] values = new String[]{"", "a", "bb", "ccc", "dddd", "eeeee",
"ffffff", "ggggggg", "hhhhhhhh", "iiiiiiiii",
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM" +
"NOPQRSTUVWXYZ01234567890!@#$%^&*()-=_+[]{}|;':,./<>?"};
Token<AbstractDelegationTokenIdentifier> orig;
Token<AbstractDelegationTokenIdentifier> copy =
new Token<AbstractDelegationTokenIdentifier>();
// ensure that for each string the input and output values match
for(int i=0; i< values.length; ++i) {
String val = values[i];
System.out.println("Input = " + val);
orig = new Token<AbstractDelegationTokenIdentifier>(val.getBytes(),
val.getBytes(), new Text(val), new Text(val));
String encode = orig.encodeToUrlString();
copy.decodeFromUrlString(encode);
assertEquals(orig, copy);
checkUrlSafe(encode);
}
}
public void testDecodeIdentifier() throws IOException {
TestDelegationTokenSecretManager secretManager =
new TestDelegationTokenSecretManager(0, 0, 0, 0);
secretManager.startThreads();
TestDelegationTokenIdentifier id = new TestDelegationTokenIdentifier(
new Text("owner"), new Text("renewer"), new Text("realUser"));
Token<TestDelegationTokenIdentifier> token =
new Token<TestDelegationTokenIdentifier>(id, secretManager);
TokenIdentifier idCopy = token.decodeIdentifier();
assertNotSame(id, idCopy);
assertEquals(id, idCopy);
}
}
| 4,365 | 36.637931 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation;
import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestDelegationToken {
private static final Log LOG = LogFactory.getLog(TestDelegationToken.class);
private static final Text KIND = new Text("MY KIND");
public static class TestDelegationTokenIdentifier
extends AbstractDelegationTokenIdentifier
implements Writable {
public TestDelegationTokenIdentifier() {
}
public TestDelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
super(owner, renewer, realUser);
}
@Override
public Text getKind() {
return KIND;
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
}
}
public static class TestDelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<TestDelegationTokenIdentifier> {
public boolean isStoreNewMasterKeyCalled = false;
public boolean isRemoveStoredMasterKeyCalled = false;
public boolean isStoreNewTokenCalled = false;
public boolean isRemoveStoredTokenCalled = false;
public boolean isUpdateStoredTokenCalled = false;
public TestDelegationTokenSecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
}
@Override
public TestDelegationTokenIdentifier createIdentifier() {
return new TestDelegationTokenIdentifier();
}
@Override
protected byte[] createPassword(TestDelegationTokenIdentifier t) {
return super.createPassword(t);
}
@Override
protected void storeNewMasterKey(DelegationKey key) throws IOException {
isStoreNewMasterKeyCalled = true;
super.storeNewMasterKey(key);
}
@Override
protected void removeStoredMasterKey(DelegationKey key) {
isRemoveStoredMasterKeyCalled = true;
Assert.assertFalse(key.equals(allKeys.get(currentId)));
}
@Override
protected void storeNewToken(TestDelegationTokenIdentifier ident,
long renewDate) throws IOException {
super.storeNewToken(ident, renewDate);
isStoreNewTokenCalled = true;
}
@Override
protected void removeStoredToken(TestDelegationTokenIdentifier ident)
throws IOException {
super.removeStoredToken(ident);
isRemoveStoredTokenCalled = true;
}
@Override
protected void updateStoredToken(TestDelegationTokenIdentifier ident,
long renewDate) throws IOException {
super.updateStoredToken(ident, renewDate);
isUpdateStoredTokenCalled = true;
}
public byte[] createPassword(TestDelegationTokenIdentifier t, DelegationKey key) {
return SecretManager.createPassword(t.getBytes(), key.getKey());
}
public Map<TestDelegationTokenIdentifier, DelegationTokenInformation> getAllTokens() {
return currentTokens;
}
public DelegationKey getKey(TestDelegationTokenIdentifier id) {
return allKeys.get(id.getMasterKeyId());
}
}
public static class TokenSelector extends
AbstractDelegationTokenSelector<TestDelegationTokenIdentifier>{
protected TokenSelector() {
super(KIND);
}
}
@Test
public void testSerialization() throws Exception {
TestDelegationTokenIdentifier origToken = new
TestDelegationTokenIdentifier(new Text("alice"),
new Text("bob"),
new Text("colin"));
TestDelegationTokenIdentifier newToken = new TestDelegationTokenIdentifier();
origToken.setIssueDate(123);
origToken.setMasterKeyId(321);
origToken.setMaxDate(314);
origToken.setSequenceNumber(12345);
// clone origToken into newToken
DataInputBuffer inBuf = new DataInputBuffer();
DataOutputBuffer outBuf = new DataOutputBuffer();
origToken.write(outBuf);
inBuf.reset(outBuf.getData(), 0, outBuf.getLength());
newToken.readFields(inBuf);
// now test the fields
assertEquals("alice", newToken.getUser().getUserName());
assertEquals(new Text("bob"), newToken.getRenewer());
assertEquals("colin", newToken.getUser().getRealUser().getUserName());
assertEquals(123, newToken.getIssueDate());
assertEquals(321, newToken.getMasterKeyId());
assertEquals(314, newToken.getMaxDate());
assertEquals(12345, newToken.getSequenceNumber());
assertEquals(origToken, newToken);
}
private Token<TestDelegationTokenIdentifier> generateDelegationToken(
TestDelegationTokenSecretManager dtSecretManager,
String owner, String renewer) {
TestDelegationTokenIdentifier dtId =
new TestDelegationTokenIdentifier(new Text(
owner), new Text(renewer), null);
return new Token<TestDelegationTokenIdentifier>(dtId, dtSecretManager);
}
private void shouldThrow(PrivilegedExceptionAction<Object> action,
Class<? extends Throwable> except) {
try {
action.run();
Assert.fail("action did not throw " + except);
} catch (Throwable th) {
LOG.info("Caught an exception: ", th);
assertEquals("action threw wrong exception", except, th.getClass());
}
}
@Test
public void testGetUserNullOwner() {
TestDelegationTokenIdentifier ident =
new TestDelegationTokenIdentifier(null, null, null);
UserGroupInformation ugi = ident.getUser();
assertNull(ugi);
}
@Test
public void testGetUserWithOwner() {
TestDelegationTokenIdentifier ident =
new TestDelegationTokenIdentifier(new Text("owner"), null, null);
UserGroupInformation ugi = ident.getUser();
assertNull(ugi.getRealUser());
assertEquals("owner", ugi.getUserName());
assertEquals(AuthenticationMethod.TOKEN, ugi.getAuthenticationMethod());
}
@Test
public void testGetUserWithOwnerEqualsReal() {
Text owner = new Text("owner");
TestDelegationTokenIdentifier ident =
new TestDelegationTokenIdentifier(owner, null, owner);
UserGroupInformation ugi = ident.getUser();
assertNull(ugi.getRealUser());
assertEquals("owner", ugi.getUserName());
assertEquals(AuthenticationMethod.TOKEN, ugi.getAuthenticationMethod());
}
@Test
public void testGetUserWithOwnerAndReal() {
Text owner = new Text("owner");
Text realUser = new Text("realUser");
TestDelegationTokenIdentifier ident =
new TestDelegationTokenIdentifier(owner, null, realUser);
UserGroupInformation ugi = ident.getUser();
assertNotNull(ugi.getRealUser());
assertNull(ugi.getRealUser().getRealUser());
assertEquals("owner", ugi.getUserName());
assertEquals("realUser", ugi.getRealUser().getUserName());
assertEquals(AuthenticationMethod.PROXY,
ugi.getAuthenticationMethod());
assertEquals(AuthenticationMethod.TOKEN,
ugi.getRealUser().getAuthenticationMethod());
}
@Test
public void testDelegationTokenSecretManager() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager =
new TestDelegationTokenSecretManager(24*60*60*1000,
3*1000,1*1000,3600000);
try {
dtSecretManager.startThreads();
final Token<TestDelegationTokenIdentifier> token =
generateDelegationToken(
dtSecretManager, "SomeUser", "JobTracker");
Assert.assertTrue(dtSecretManager.isStoreNewTokenCalled);
// Fake renewer should not be able to renew
shouldThrow(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
dtSecretManager.renewToken(token, "FakeRenewer");
return null;
}
}, AccessControlException.class);
long time = dtSecretManager.renewToken(token, "JobTracker");
Assert.assertTrue(dtSecretManager.isUpdateStoredTokenCalled);
assertTrue("renew time is in future", time > Time.now());
TestDelegationTokenIdentifier identifier =
new TestDelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
identifier.readFields(new DataInputStream(
new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(2000);
//Token should be expired
try {
dtSecretManager.retrievePassword(identifier);
//Should not come here
Assert.fail("Token should have expired");
} catch (InvalidToken e) {
//Success
}
dtSecretManager.renewToken(token, "JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(2000);
shouldThrow(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
dtSecretManager.renewToken(token, "JobTracker");
return null;
}
}, InvalidToken.class);
} finally {
dtSecretManager.stopThreads();
}
}
@Test
public void testCancelDelegationToken() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager =
new TestDelegationTokenSecretManager(24*60*60*1000,
10*1000,1*1000,3600000);
try {
dtSecretManager.startThreads();
final Token<TestDelegationTokenIdentifier> token =
generateDelegationToken(dtSecretManager, "SomeUser", "JobTracker");
//Fake renewer should not be able to renew
shouldThrow(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
dtSecretManager.renewToken(token, "FakeCanceller");
return null;
}
}, AccessControlException.class);
dtSecretManager.cancelToken(token, "JobTracker");
Assert.assertTrue(dtSecretManager.isRemoveStoredTokenCalled);
shouldThrow(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
dtSecretManager.renewToken(token, "JobTracker");
return null;
}
}, InvalidToken.class);
} finally {
dtSecretManager.stopThreads();
}
}
@Test(timeout = 10000)
public void testRollMasterKey() throws Exception {
TestDelegationTokenSecretManager dtSecretManager =
new TestDelegationTokenSecretManager(800,
800,1*1000,3600000);
try {
dtSecretManager.startThreads();
//generate a token and store the password
Token<TestDelegationTokenIdentifier> token = generateDelegationToken(
dtSecretManager, "SomeUser", "JobTracker");
byte[] oldPasswd = token.getPassword();
//store the length of the keys list
int prevNumKeys = dtSecretManager.getAllKeys().length;
dtSecretManager.rollMasterKey();
Assert.assertTrue(dtSecretManager.isStoreNewMasterKeyCalled);
//after rolling, the length of the keys list must increase
int currNumKeys = dtSecretManager.getAllKeys().length;
Assert.assertEquals((currNumKeys - prevNumKeys) >= 1, true);
//after rolling, the token that was generated earlier must
//still be valid (retrievePassword will fail if the token
//is not valid)
ByteArrayInputStream bi =
new ByteArrayInputStream(token.getIdentifier());
TestDelegationTokenIdentifier identifier =
dtSecretManager.createIdentifier();
identifier.readFields(new DataInputStream(bi));
byte[] newPasswd =
dtSecretManager.retrievePassword(identifier);
//compare the passwords
Assert.assertEquals(oldPasswd, newPasswd);
// wait for keys to expire
while(!dtSecretManager.isRemoveStoredMasterKeyCalled) {
Thread.sleep(200);
}
} finally {
dtSecretManager.stopThreads();
}
}
@Test
@SuppressWarnings("unchecked")
public void testDelegationTokenSelector() throws Exception {
TestDelegationTokenSecretManager dtSecretManager =
new TestDelegationTokenSecretManager(24*60*60*1000,
10*1000,1*1000,3600000);
try {
dtSecretManager.startThreads();
AbstractDelegationTokenSelector ds =
new AbstractDelegationTokenSelector<TestDelegationTokenIdentifier>(KIND);
//Creates a collection of tokens
Token<TestDelegationTokenIdentifier> token1 = generateDelegationToken(
dtSecretManager, "SomeUser1", "JobTracker");
token1.setService(new Text("MY-SERVICE1"));
Token<TestDelegationTokenIdentifier> token2 = generateDelegationToken(
dtSecretManager, "SomeUser2", "JobTracker");
token2.setService(new Text("MY-SERVICE2"));
List<Token<TestDelegationTokenIdentifier>> tokens =
new ArrayList<Token<TestDelegationTokenIdentifier>>();
tokens.add(token1);
tokens.add(token2);
//try to select a token with a given service name (created earlier)
Token<TestDelegationTokenIdentifier> t =
ds.selectToken(new Text("MY-SERVICE1"), tokens);
Assert.assertEquals(t, token1);
} finally {
dtSecretManager.stopThreads();
}
}
@Test
public void testParallelDelegationTokenCreation() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager =
new TestDelegationTokenSecretManager(2000, 24 * 60 * 60 * 1000,
7 * 24 * 60 * 60 * 1000, 2000);
try {
dtSecretManager.startThreads();
int numThreads = 100;
final int numTokensPerThread = 100;
class tokenIssuerThread implements Runnable {
@Override
public void run() {
for(int i =0;i <numTokensPerThread; i++) {
generateDelegationToken(dtSecretManager, "auser", "arenewer");
try {
Thread.sleep(250);
} catch (Exception e) {
}
}
}
}
Thread[] issuers = new Thread[numThreads];
for (int i =0; i <numThreads; i++) {
issuers[i] = new Daemon(new tokenIssuerThread());
issuers[i].start();
}
for (int i =0; i <numThreads; i++) {
issuers[i].join();
}
Map<TestDelegationTokenIdentifier, DelegationTokenInformation> tokenCache = dtSecretManager
.getAllTokens();
Assert.assertEquals(numTokensPerThread*numThreads, tokenCache.size());
Iterator<TestDelegationTokenIdentifier> iter = tokenCache.keySet().iterator();
while (iter.hasNext()) {
TestDelegationTokenIdentifier id = iter.next();
DelegationTokenInformation info = tokenCache.get(id);
Assert.assertTrue(info != null);
DelegationKey key = dtSecretManager.getKey(id);
Assert.assertTrue(key != null);
byte[] storedPassword = dtSecretManager.retrievePassword(id);
byte[] password = dtSecretManager.createPassword(id, key);
Assert.assertTrue(Arrays.equals(password, storedPassword));
//verify by secret manager api
dtSecretManager.verifyToken(id, password);
}
} finally {
dtSecretManager.stopThreads();
}
}
@Test
public void testDelegationTokenNullRenewer() throws Exception {
TestDelegationTokenSecretManager dtSecretManager =
new TestDelegationTokenSecretManager(24*60*60*1000,
10*1000,1*1000,3600000);
dtSecretManager.startThreads();
TestDelegationTokenIdentifier dtId = new TestDelegationTokenIdentifier(new Text(
"theuser"), null, null);
Token<TestDelegationTokenIdentifier> token = new Token<TestDelegationTokenIdentifier>(
dtId, dtSecretManager);
Assert.assertTrue(token != null);
try {
dtSecretManager.renewToken(token, "");
Assert.fail("Renewal must not succeed");
} catch (IOException e) {
//PASS
}
}
private boolean testDelegationTokenIdentiferSerializationRoundTrip(Text owner,
Text renewer, Text realUser) throws IOException {
TestDelegationTokenIdentifier dtid = new TestDelegationTokenIdentifier(
owner, renewer, realUser);
DataOutputBuffer out = new DataOutputBuffer();
dtid.writeImpl(out);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
try {
TestDelegationTokenIdentifier dtid2 =
new TestDelegationTokenIdentifier();
dtid2.readFields(in);
assertTrue(dtid.equals(dtid2));
return true;
} catch(IOException e){
return false;
}
}
@Test
public void testSimpleDtidSerialization() throws IOException {
assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
new Text("owner"), new Text("renewer"), new Text("realUser")));
assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
new Text(""), new Text(""), new Text("")));
assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
new Text(""), new Text("b"), new Text("")));
}
@Test
public void testOverlongDtidSerialization() throws IOException {
byte[] bigBuf = new byte[Text.DEFAULT_MAX_LEN + 1];
for (int i = 0; i < bigBuf.length; i++) {
bigBuf[i] = 0;
}
assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
new Text(bigBuf), new Text("renewer"), new Text("realUser")));
assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
new Text("owner"), new Text(bigBuf), new Text("realUser")));
assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
new Text("owner"), new Text("renewer"), new Text(bigBuf)));
}
@Test
public void testDelegationKeyEqualAndHash() {
DelegationKey key1 = new DelegationKey(1111, 2222, "keyBytes".getBytes());
DelegationKey key2 = new DelegationKey(1111, 2222, "keyBytes".getBytes());
DelegationKey key3 = new DelegationKey(3333, 2222, "keyBytes".getBytes());
Assert.assertEquals(key1, key2);
Assert.assertFalse(key2.equals(key3));
}
@Test
public void testEmptyToken() throws IOException {
Token<?> token1 = new Token<TokenIdentifier>();
Token<?> token2 = new Token<TokenIdentifier>(new byte[0], new byte[0],
new Text(), new Text());
assertEquals(token1, token2);
assertEquals(token1.encodeToUrlString(), token2.encodeToUrlString());
token2 = new Token<TokenIdentifier>(null, null, null, null);
assertEquals(token1, token2);
assertEquals(token1.encodeToUrlString(), token2.encodeToUrlString());
}
}
| 20,769 | 36.222222 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import org.apache.curator.RetryPolicy;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.framework.api.ACLProvider;
import org.apache.curator.retry.ExponentialBackoffRetry;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
import org.apache.zookeeper.ZooDefs;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Id;
import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import static org.junit.Assert.fail;
import org.junit.Test;
public class TestZKDelegationTokenSecretManager {
private static final int TEST_RETRIES = 2;
private static final int RETRY_COUNT = 5;
private static final int RETRY_WAIT = 1000;
private static final long DAY_IN_SECS = 86400;
private TestingServer zkServer;
@Before
public void setup() throws Exception {
zkServer = new TestingServer();
zkServer.start();
}
@After
public void tearDown() throws Exception {
if (zkServer != null) {
zkServer.close();
}
}
protected Configuration getSecretConf(String connectString) {
Configuration conf = new Configuration();
conf.setBoolean(DelegationTokenManager.ENABLE_ZK_KEY, true);
conf.set(ZKDelegationTokenSecretManager.ZK_DTSM_ZK_CONNECTION_STRING, connectString);
conf.set(ZKDelegationTokenSecretManager.ZK_DTSM_ZNODE_WORKING_PATH, "testPath");
conf.set(ZKDelegationTokenSecretManager.ZK_DTSM_ZK_AUTH_TYPE, "none");
conf.setLong(ZKDelegationTokenSecretManager.ZK_DTSM_ZK_SHUTDOWN_TIMEOUT, 100);
conf.setLong(DelegationTokenManager.UPDATE_INTERVAL, DAY_IN_SECS);
conf.setLong(DelegationTokenManager.MAX_LIFETIME, DAY_IN_SECS);
conf.setLong(DelegationTokenManager.RENEW_INTERVAL, DAY_IN_SECS);
conf.setLong(DelegationTokenManager.REMOVAL_SCAN_INTERVAL, DAY_IN_SECS);
return conf;
}
@SuppressWarnings("unchecked")
@Test
public void testMultiNodeOperations() throws Exception {
for (int i = 0; i < TEST_RETRIES; i++) {
DelegationTokenManager tm1, tm2 = null;
String connectString = zkServer.getConnectString();
Configuration conf = getSecretConf(connectString);
tm1 = new DelegationTokenManager(conf, new Text("bla"));
tm1.init();
tm2 = new DelegationTokenManager(conf, new Text("bla"));
tm2.init();
Token<DelegationTokenIdentifier> token =
(Token<DelegationTokenIdentifier>) tm1.createToken(
UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
tm2.verifyToken(token);
tm2.renewToken(token, "foo");
tm1.verifyToken(token);
tm1.cancelToken(token, "foo");
try {
verifyTokenFail(tm2, token);
fail("Expected InvalidToken");
} catch (SecretManager.InvalidToken it) {
// Ignore
}
token = (Token<DelegationTokenIdentifier>) tm2.createToken(
UserGroupInformation.getCurrentUser(), "bar");
Assert.assertNotNull(token);
tm1.verifyToken(token);
tm1.renewToken(token, "bar");
tm2.verifyToken(token);
tm2.cancelToken(token, "bar");
try {
verifyTokenFail(tm1, token);
fail("Expected InvalidToken");
} catch (SecretManager.InvalidToken it) {
// Ignore
}
verifyDestroy(tm1, conf);
verifyDestroy(tm2, conf);
}
}
@SuppressWarnings("unchecked")
@Test
public void testNodeUpAferAWhile() throws Exception {
for (int i = 0; i < TEST_RETRIES; i++) {
String connectString = zkServer.getConnectString();
Configuration conf = getSecretConf(connectString);
DelegationTokenManager tm1 = new DelegationTokenManager(conf, new Text("bla"));
tm1.init();
Token<DelegationTokenIdentifier> token1 =
(Token<DelegationTokenIdentifier>) tm1.createToken(
UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token1);
Token<DelegationTokenIdentifier> token2 =
(Token<DelegationTokenIdentifier>) tm1.createToken(
UserGroupInformation.getCurrentUser(), "bar");
Assert.assertNotNull(token2);
Token<DelegationTokenIdentifier> token3 =
(Token<DelegationTokenIdentifier>) tm1.createToken(
UserGroupInformation.getCurrentUser(), "boo");
Assert.assertNotNull(token3);
tm1.verifyToken(token1);
tm1.verifyToken(token2);
tm1.verifyToken(token3);
// Cancel one token
tm1.cancelToken(token1, "foo");
// Start second node after some time..
Thread.sleep(1000);
DelegationTokenManager tm2 = new DelegationTokenManager(conf, new Text("bla"));
tm2.init();
tm2.verifyToken(token2);
tm2.verifyToken(token3);
try {
verifyTokenFail(tm2, token1);
fail("Expected InvalidToken");
} catch (SecretManager.InvalidToken it) {
// Ignore
}
// Create a new token thru the new ZKDTSM
Token<DelegationTokenIdentifier> token4 =
(Token<DelegationTokenIdentifier>) tm2.createToken(
UserGroupInformation.getCurrentUser(), "xyz");
Assert.assertNotNull(token4);
tm2.verifyToken(token4);
tm1.verifyToken(token4);
// Bring down tm2
verifyDestroy(tm2, conf);
// Start third node after some time..
Thread.sleep(1000);
DelegationTokenManager tm3 = new DelegationTokenManager(conf, new Text("bla"));
tm3.init();
tm3.verifyToken(token2);
tm3.verifyToken(token3);
tm3.verifyToken(token4);
try {
verifyTokenFail(tm3, token1);
fail("Expected InvalidToken");
} catch (SecretManager.InvalidToken it) {
// Ignore
}
verifyDestroy(tm3, conf);
verifyDestroy(tm1, conf);
}
}
@SuppressWarnings("unchecked")
@Test
public void testRenewTokenSingleManager() throws Exception {
for (int i = 0; i < TEST_RETRIES; i++) {
DelegationTokenManager tm1 = null;
String connectString = zkServer.getConnectString();
Configuration conf = getSecretConf(connectString);
tm1 = new DelegationTokenManager(conf, new Text("foo"));
tm1.init();
Token<DelegationTokenIdentifier> token =
(Token<DelegationTokenIdentifier>)
tm1.createToken(UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
tm1.renewToken(token, "foo");
tm1.verifyToken(token);
verifyDestroy(tm1, conf);
}
}
@SuppressWarnings("unchecked")
@Test
public void testCancelTokenSingleManager() throws Exception {
for (int i = 0; i < TEST_RETRIES; i++) {
DelegationTokenManager tm1 = null;
String connectString = zkServer.getConnectString();
Configuration conf = getSecretConf(connectString);
tm1 = new DelegationTokenManager(conf, new Text("foo"));
tm1.init();
Token<DelegationTokenIdentifier> token =
(Token<DelegationTokenIdentifier>)
tm1.createToken(UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
tm1.cancelToken(token, "foo");
try {
verifyTokenFail(tm1, token);
fail("Expected InvalidToken");
} catch (SecretManager.InvalidToken it) {
it.printStackTrace();
}
verifyDestroy(tm1, conf);
}
}
@SuppressWarnings("rawtypes")
protected void verifyDestroy(DelegationTokenManager tm, Configuration conf)
throws Exception {
AbstractDelegationTokenSecretManager sm =
tm.getDelegationTokenSecretManager();
ZKDelegationTokenSecretManager zksm = (ZKDelegationTokenSecretManager) sm;
ExecutorService es = zksm.getListenerThreadPool();
tm.destroy();
Assert.assertTrue(es.isShutdown());
// wait for the pool to terminate
long timeout =
conf.getLong(
ZKDelegationTokenSecretManager.ZK_DTSM_ZK_SHUTDOWN_TIMEOUT,
ZKDelegationTokenSecretManager.ZK_DTSM_ZK_SHUTDOWN_TIMEOUT_DEFAULT);
Thread.sleep(timeout * 3);
Assert.assertTrue(es.isTerminated());
}
@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testStopThreads() throws Exception {
DelegationTokenManager tm1 = null;
String connectString = zkServer.getConnectString();
// let's make the update interval short and the shutdown interval
// comparatively longer, so if the update thread runs after shutdown,
// it will cause an error.
final long updateIntervalSeconds = 1;
final long shutdownTimeoutMillis = updateIntervalSeconds * 1000 * 5;
Configuration conf = getSecretConf(connectString);
conf.setLong(DelegationTokenManager.UPDATE_INTERVAL, updateIntervalSeconds);
conf.setLong(DelegationTokenManager.REMOVAL_SCAN_INTERVAL, updateIntervalSeconds);
conf.setLong(DelegationTokenManager.RENEW_INTERVAL, updateIntervalSeconds);
conf.setLong(ZKDelegationTokenSecretManager.ZK_DTSM_ZK_SHUTDOWN_TIMEOUT, shutdownTimeoutMillis);
tm1 = new DelegationTokenManager(conf, new Text("foo"));
tm1.init();
Token<DelegationTokenIdentifier> token =
(Token<DelegationTokenIdentifier>)
tm1.createToken(UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
AbstractDelegationTokenSecretManager sm = tm1.getDelegationTokenSecretManager();
ZKDelegationTokenSecretManager zksm = (ZKDelegationTokenSecretManager)sm;
ExecutorService es = zksm.getListenerThreadPool();
es.submit(new Callable<Void>() {
public Void call() throws Exception {
Thread.sleep(shutdownTimeoutMillis * 2); // force this to be shutdownNow
return null;
}
});
tm1.destroy();
}
@Test
public void testACLs() throws Exception {
DelegationTokenManager tm1;
String connectString = zkServer.getConnectString();
Configuration conf = getSecretConf(connectString);
RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
String userPass = "myuser:mypass";
final ACL digestACL = new ACL(ZooDefs.Perms.ALL, new Id("digest",
DigestAuthenticationProvider.generateDigest(userPass)));
ACLProvider digestAclProvider = new ACLProvider() {
@Override
public List<ACL> getAclForPath(String path) { return getDefaultAcl(); }
@Override
public List<ACL> getDefaultAcl() {
List<ACL> ret = new ArrayList<ACL>();
ret.add(digestACL);
return ret;
}
};
CuratorFramework curatorFramework =
CuratorFrameworkFactory.builder()
.connectString(connectString)
.retryPolicy(retryPolicy)
.aclProvider(digestAclProvider)
.authorization("digest", userPass.getBytes("UTF-8"))
.build();
curatorFramework.start();
ZKDelegationTokenSecretManager.setCurator(curatorFramework);
tm1 = new DelegationTokenManager(conf, new Text("bla"));
tm1.init();
// check ACL
String workingPath = conf.get(ZKDelegationTokenSecretManager.ZK_DTSM_ZNODE_WORKING_PATH);
verifyACL(curatorFramework, "/" + workingPath, digestACL);
tm1.destroy();
ZKDelegationTokenSecretManager.setCurator(null);
curatorFramework.close();
}
private void verifyACL(CuratorFramework curatorFramework,
String path, ACL expectedACL) throws Exception {
List<ACL> acls = curatorFramework.getACL().forPath(path);
Assert.assertEquals(1, acls.size());
Assert.assertEquals(expectedACL, acls.get(0));
}
// Since it is possible that there can be a delay for the cancel token message
// initiated by one node to reach another node.. The second node can ofcourse
// verify with ZK directly if the token that needs verification has been
// cancelled but.. that would mean having to make an RPC call for every
// verification request.
// Thus, the eventual consistency tradef-off should be acceptable here...
private void verifyTokenFail(DelegationTokenManager tm,
Token<DelegationTokenIdentifier> token) throws IOException,
InterruptedException {
verifyTokenFailWithRetry(tm, token, RETRY_COUNT);
}
private void verifyTokenFailWithRetry(DelegationTokenManager tm,
Token<DelegationTokenIdentifier> token, int retryCount)
throws IOException, InterruptedException {
try {
tm.verifyToken(token);
} catch (SecretManager.InvalidToken er) {
throw er;
}
if (retryCount > 0) {
Thread.sleep(RETRY_WAIT);
verifyTokenFailWithRetry(tm, token, retryCount - 1);
}
}
}
| 13,951 | 35.145078 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mortbay.jetty.AbstractConnector;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.FilterHolder;
import org.mortbay.jetty.servlet.ServletHolder;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.servlet.Filter;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.IOException;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.URL;
import java.security.Principal;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
public class TestWebDelegationToken {
private static final String OK_USER = "ok-user";
private static final String FAIL_USER = "fail-user";
private static final String FOO_USER = "foo";
private Server jetty;
public static class DummyAuthenticationHandler
implements AuthenticationHandler {
@Override
public String getType() {
return "dummy";
}
@Override
public void init(Properties config) throws ServletException {
}
@Override
public void destroy() {
}
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
return false;
}
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token = null;
if (request.getParameter("authenticated") != null) {
token = new AuthenticationToken(request.getParameter("authenticated"),
"U", "test");
} else {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "dummy");
}
return token;
}
}
public static class DummyDelegationTokenAuthenticationHandler extends
DelegationTokenAuthenticationHandler {
public DummyDelegationTokenAuthenticationHandler() {
super(new DummyAuthenticationHandler());
}
@Override
public void init(Properties config) throws ServletException {
Properties conf = new Properties(config);
conf.setProperty(TOKEN_KIND, "token-kind");
initTokenManager(conf);
}
}
public static class AFilter extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
DummyDelegationTokenAuthenticationHandler.class.getName());
return conf;
}
}
public static class PingServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write("ping");
if (req.getHeader(DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER)
!= null) {
resp.setHeader("UsingHeader", "true");
}
if (req.getQueryString() != null &&
req.getQueryString().contains(
DelegationTokenAuthenticator.DELEGATION_PARAM + "=")) {
resp.setHeader("UsingQueryString", "true");
}
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
Writer writer = resp.getWriter();
writer.write("ping: ");
IOUtils.copy(req.getReader(), writer);
resp.setStatus(HttpServletResponse.SC_OK);
}
}
protected Server createJettyServer() {
try {
InetAddress localhost = InetAddress.getLocalHost();
ServerSocket ss = new ServerSocket(0, 50, localhost);
int port = ss.getLocalPort();
ss.close();
jetty = new Server(0);
jetty.getConnectors()[0].setHost("localhost");
jetty.getConnectors()[0].setPort(port);
return jetty;
} catch (Exception ex) {
throw new RuntimeException("Could not setup Jetty: " + ex.getMessage(),
ex);
}
}
protected String getJettyURL() {
Connector c = jetty.getConnectors()[0];
return "http://" + c.getHost() + ":" + c.getPort();
}
@Before
public void setUp() throws Exception {
// resetting hadoop security to simple
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
UserGroupInformation.setConfiguration(conf);
jetty = createJettyServer();
}
@After
public void cleanUp() throws Exception {
jetty.stop();
// resetting hadoop security to simple
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
UserGroupInformation.setConfiguration(conf);
}
protected Server getJetty() {
return jetty;
}
@Test
public void testRawHttpCalls() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
jetty.start();
URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
// unauthenticated access to URL
HttpURLConnection conn = (HttpURLConnection) nonAuthURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// authenticated access to URL
conn = (HttpURLConnection) authURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// unauthenticated access to get delegation token
URL url = new URL(nonAuthURL.toExternalForm() + "?op=GETDELEGATIONTOKEN");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// authenticated access to get delegation token
url = new URL(authURL.toExternalForm() +
"&op=GETDELEGATIONTOKEN&renewer=foo");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
ObjectMapper mapper = new ObjectMapper();
Map map = mapper.readValue(conn.getInputStream(), Map.class);
String dt = (String) ((Map) map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
// delegation token access to URL
url = new URL(nonAuthURL.toExternalForm() + "?delegation=" + dt);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// delegation token and authenticated access to URL
url = new URL(authURL.toExternalForm() + "&delegation=" + dt);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// renewew delegation token, unauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// renewew delegation token, authenticated access to URL
url = new URL(authURL.toExternalForm() +
"&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// renewew delegation token, authenticated access to URL, not renewer
url = new URL(getJettyURL() +
"/foo/bar?authenticated=bar&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
// cancel delegation token, nonauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// cancel canceled delegation token, nonauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
conn.getResponseCode());
// get new delegation token
url = new URL(authURL.toExternalForm() +
"&op=GETDELEGATIONTOKEN&renewer=foo");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
mapper = new ObjectMapper();
map = mapper.readValue(conn.getInputStream(), Map.class);
dt = (String) ((Map) map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
// cancel delegation token, authenticated access to URL
url = new URL(authURL.toExternalForm() +
"&op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
} finally {
jetty.stop();
}
}
@Test
public void testDelegationTokenAuthenticatorCallsWithHeader()
throws Exception {
testDelegationTokenAuthenticatorCalls(false);
}
@Test
public void testDelegationTokenAuthenticatorCallsWithQueryString()
throws Exception {
testDelegationTokenAuthenticatorCalls(true);
}
private void testDelegationTokenAuthenticatorCalls(final boolean useQS)
throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
jetty.start();
final URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
final DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
aUrl.setUseQueryStringForDelegationToken(useQS);
try {
aUrl.getDelegationToken(nonAuthURL, token, FOO_USER);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),
token.getDelegationToken().getKind());
aUrl.renewDelegationToken(authURL, token);
try {
aUrl.renewDelegationToken(nonAuthURL, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL, token, FOO_USER);
try {
aUrl.renewDelegationToken(authURL2, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(authURL, token, FOO_USER);
aUrl.cancelDelegationToken(authURL, token);
aUrl.getDelegationToken(authURL, token, FOO_USER);
aUrl.cancelDelegationToken(nonAuthURL, token);
aUrl.getDelegationToken(authURL, token, FOO_USER);
try {
aUrl.renewDelegationToken(nonAuthURL, token);
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL, token, "foo");
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
ugi.addToken(token.getDelegationToken());
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
HttpURLConnection conn = aUrl.openConnection(nonAuthURL, new DelegationTokenAuthenticatedURL.Token());
Assert.assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
if (useQS) {
Assert.assertNull(conn.getHeaderField("UsingHeader"));
Assert.assertNotNull(conn.getHeaderField("UsingQueryString"));
} else {
Assert.assertNotNull(conn.getHeaderField("UsingHeader"));
Assert.assertNull(conn.getHeaderField("UsingQueryString"));
}
return null;
}
});
} finally {
jetty.stop();
}
}
private static class DummyDelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
public DummyDelegationTokenSecretManager() {
super(10000, 10000, 10000, 10000);
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(new Text("fooKind"));
}
}
@Test
public void testExternalDelegationTokenSecretManager() throws Exception {
DummyDelegationTokenSecretManager secretMgr
= new DummyDelegationTokenSecretManager();
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
secretMgr.startThreads();
context.setAttribute(DelegationTokenAuthenticationFilter.
DELEGATION_TOKEN_SECRET_MANAGER_ATTR, secretMgr);
jetty.start();
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
aUrl.getDelegationToken(authURL, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("fooKind"),
token.getDelegationToken().getKind());
} finally {
jetty.stop();
secretMgr.stopThreads();
}
}
public static class NoDTFilter extends AuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
return conf;
}
}
public static class NoDTHandlerDTAFilter
extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
return conf;
}
}
public static class UserServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write(req.getUserPrincipal().getName());
}
}
@Test
public void testDelegationTokenAuthenticationURLWithNoDTFilter()
throws Exception {
testDelegationTokenAuthenticatedURLWithNoDT(NoDTFilter.class);
}
@Test
public void testDelegationTokenAuthenticationURLWithNoDTHandler()
throws Exception {
testDelegationTokenAuthenticatedURLWithNoDT(NoDTHandlerDTAFilter.class);
}
// we are, also, implicitly testing KerberosDelegationTokenAuthenticator
// fallback here
private void testDelegationTokenAuthenticatedURLWithNoDT(
Class<? extends Filter> filterClass) throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(filterClass), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(FOO_USER, ret.get(0));
try {
aUrl.getDelegationToken(url, token, FOO_USER);
Assert.fail();
} catch (AuthenticationException ex) {
Assert.assertTrue(ex.getMessage().contains(
"delegation token operation"));
}
return null;
}
});
} finally {
jetty.stop();
}
}
public static class PseudoDTAFilter
extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
conf.setProperty(DelegationTokenAuthenticationHandler.TOKEN_KIND,
"token-kind");
return conf;
}
@Override
protected org.apache.hadoop.conf.Configuration getProxyuserConfiguration(
FilterConfig filterConfig) throws ServletException {
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration(false);
conf.set("proxyuser.foo.users", OK_USER);
conf.set("proxyuser.foo.hosts", "localhost");
return conf;
}
}
@Test
public void testFallbackToPseudoDelegationTokenAuthenticator()
throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(FOO_USER, ret.get(0));
aUrl.getDelegationToken(url, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),
token.getDelegationToken().getKind());
return null;
}
});
} finally {
jetty.stop();
}
}
public static class KDTAFilter extends DelegationTokenAuthenticationFilter {
static String keytabFile;
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
conf.setProperty(KerberosAuthenticationHandler.KEYTAB, keytabFile);
conf.setProperty(KerberosAuthenticationHandler.PRINCIPAL,
"HTTP/localhost");
conf.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND,
"token-kind");
return conf;
}
@Override
protected org.apache.hadoop.conf.Configuration getProxyuserConfiguration(
FilterConfig filterConfig) throws ServletException {
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration(false);
conf.set("proxyuser.client.users", OK_USER);
conf.set("proxyuser.client.hosts", "127.0.0.1");
return conf;
}
}
private static class KerberosConfiguration extends Configuration {
private String principal;
private String keytab;
public KerberosConfiguration(String principal, String keytab) {
this.principal = principal;
this.keytab = keytab;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
options.put("principal", principal);
options.put("keyTab", keytab);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", "true");
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options),};
}
}
public static <T> T doAsKerberosUser(String principal, String keytab,
final Callable<T> callable) throws Exception {
LoginContext loginContext = null;
try {
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(principal));
Subject subject = new Subject(false, principals, new HashSet<Object>(),
new HashSet<Object>());
loginContext = new LoginContext("", subject, null,
new KerberosConfiguration(principal, keytab));
loginContext.login();
subject = loginContext.getSubject();
return Subject.doAs(subject, new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return callable.call();
}
});
} catch (PrivilegedActionException ex) {
throw ex.getException();
} finally {
if (loginContext != null) {
loginContext.logout();
}
}
}
@Test
public void testKerberosDelegationTokenAuthenticator() throws Exception {
testKerberosDelegationTokenAuthenticator(false);
}
@Test
public void testKerberosDelegationTokenAuthenticatorWithDoAs()
throws Exception {
testKerberosDelegationTokenAuthenticator(true);
}
private void testKerberosDelegationTokenAuthenticator(
final boolean doAs) throws Exception {
final String doAsUser = doAs ? OK_USER : null;
// setting hadoop security to kerberos
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir = new File("target/" + UUID.randomUUID().toString());
Assert.assertTrue(testDir.mkdirs());
MiniKdc kdc = new MiniKdc(MiniKdc.createConf(), testDir);
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
((AbstractConnector)jetty.getConnectors()[0]).setResolveNames(true);
context.addFilter(new FilterHolder(KDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
kdc.start();
File keytabFile = new File(testDir, "test.keytab");
kdc.createPrincipal(keytabFile, "client", "HTTP/localhost");
KDTAFilter.keytabFile = keytabFile.getAbsolutePath();
jetty.start();
final DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
final DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
final URL url = new URL(getJettyURL() + "/foo/bar");
try {
aUrl.getDelegationToken(url, token, FOO_USER, doAsUser);
Assert.fail();
} catch (AuthenticationException ex) {
Assert.assertTrue(ex.getMessage().contains("GSSException"));
}
doAsKerberosUser("client", keytabFile.getAbsolutePath(),
new Callable<Void>() {
@Override
public Void call() throws Exception {
aUrl.getDelegationToken(
url, token, doAs ? doAsUser : "client", doAsUser);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),
token.getDelegationToken().getKind());
// Make sure the token belongs to the right owner
ByteArrayInputStream buf = new ByteArrayInputStream(
token.getDelegationToken().getIdentifier());
DataInputStream dis = new DataInputStream(buf);
DelegationTokenIdentifier id =
new DelegationTokenIdentifier(new Text("token-kind"));
id.readFields(dis);
dis.close();
Assert.assertEquals(
doAs ? new Text(OK_USER) : new Text("client"), id.getOwner());
if (doAs) {
Assert.assertEquals(new Text("client"), id.getRealUser());
}
aUrl.renewDelegationToken(url, token, doAsUser);
Assert.assertNotNull(token.getDelegationToken());
aUrl.getDelegationToken(url, token, FOO_USER, doAsUser);
Assert.assertNotNull(token.getDelegationToken());
try {
aUrl.renewDelegationToken(url, token, doAsUser);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(url, token, FOO_USER, doAsUser);
aUrl.cancelDelegationToken(url, token, doAsUser);
Assert.assertNull(token.getDelegationToken());
return null;
}
});
} finally {
jetty.stop();
kdc.stop();
}
}
@Test
public void testProxyUser() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
// proxyuser using raw HTTP, verifying doAs is case insensitive
String strUrl = String.format("%s?user.name=%s&doas=%s",
url.toExternalForm(), FOO_USER, OK_USER);
HttpURLConnection conn =
(HttpURLConnection) new URL(strUrl).openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(OK_USER, ret.get(0));
strUrl = String.format("%s?user.name=%s&DOAS=%s", url.toExternalForm(),
FOO_USER, OK_USER);
conn = (HttpURLConnection) new URL(strUrl).openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(OK_USER, ret.get(0));
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
// proxyuser using authentication handler authentication
HttpURLConnection conn = aUrl.openConnection(url, token, OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(OK_USER, ret.get(0));
// unauthorized proxy user using authentication handler authentication
conn = aUrl.openConnection(url, token, FAIL_USER);
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
// proxy using delegation token authentication
aUrl.getDelegationToken(url, token, FOO_USER);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
ugi.addToken(token.getDelegationToken());
token = new DelegationTokenAuthenticatedURL.Token();
// requests using delegation token as auth do not honor doAs
conn = aUrl.openConnection(url, token, OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(FOO_USER, ret.get(0));
return null;
}
});
} finally {
jetty.stop();
}
}
public static class UGIServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
UserGroupInformation ugi = HttpUserGroupInformation.get();
if (ugi != null) {
String ret = "remoteuser=" + req.getRemoteUser() + ":ugi=" +
ugi.getShortUserName();
if (ugi.getAuthenticationMethod() ==
UserGroupInformation.AuthenticationMethod.PROXY) {
ret = "realugi=" + ugi.getRealUser().getShortUserName() + ":" + ret;
}
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write(ret);
} else {
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
}
@Test
public void testHttpUGI() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UGIServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
// user foo
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals("remoteuser=" + FOO_USER+ ":ugi=" + FOO_USER,
ret.get(0));
// user ok-user via proxyuser foo
conn = aUrl.openConnection(url, token, OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals("realugi=" + FOO_USER +":remoteuser=" + OK_USER +
":ugi=" + OK_USER, ret.get(0));
return null;
}
});
} finally {
jetty.stop();
}
}
public static class IpAddressBasedPseudoDTAFilter extends PseudoDTAFilter {
@Override
protected org.apache.hadoop.conf.Configuration getProxyuserConfiguration
(FilterConfig filterConfig) throws ServletException {
org.apache.hadoop.conf.Configuration configuration = super
.getProxyuserConfiguration(filterConfig);
configuration.set("proxyuser.foo.hosts", "127.0.0.1");
return configuration;
}
}
@Test
public void testIpaddressCheck() throws Exception {
final Server jetty = createJettyServer();
((AbstractConnector)jetty.getConnectors()[0]).setResolveNames(true);
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(IpAddressBasedPseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UGIServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
// user ok-user via proxyuser foo
HttpURLConnection conn = aUrl.openConnection(url, token, OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals("realugi=" + FOO_USER +":remoteuser=" + OK_USER +
":ugi=" + OK_USER, ret.get(0));
return null;
}
});
} finally {
jetty.stop();
}
}
}
| 37,930 | 35.933788 | 121 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Map;
import java.util.Properties;
public class TestDelegationTokenAuthenticationHandlerWithMocks {
public static class MockDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public MockDelegationTokenAuthenticationHandler() {
super(new AuthenticationHandler() {
@Override
public String getType() {
return "T";
}
@Override
public void init(Properties config) throws ServletException {
}
@Override
public void destroy() {
}
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
return false;
}
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "mock");
return null;
}
});
}
}
private DelegationTokenAuthenticationHandler handler;
@Before
public void setUp() throws Exception {
Properties conf = new Properties();
conf.put(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND, "foo");
handler = new MockDelegationTokenAuthenticationHandler();
handler.initTokenManager(conf);
}
@After
public void cleanUp() {
handler.destroy();
}
@Test
public void testManagementOperations() throws Exception {
testNonManagementOperation();
testManagementOperationErrors();
testGetToken(null, new Text("foo"));
testGetToken("bar", new Text("foo"));
testCancelToken();
testRenewToken();
}
private void testNonManagementOperation() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getParameter(
DelegationTokenAuthenticator.OP_PARAM)).thenReturn(null);
Assert.assertTrue(handler.managementOperation(null, request, null));
Mockito.when(request.getParameter(
DelegationTokenAuthenticator.OP_PARAM)).thenReturn("CREATE");
Assert.assertTrue(handler.managementOperation(null, request, null));
}
private void testManagementOperationErrors() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" +
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN.toString()
);
Mockito.when(request.getMethod()).thenReturn("FOO");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.startsWith("Wrong HTTP method"));
Mockito.reset(response);
Mockito.when(request.getMethod()).thenReturn(
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN.getHttpMethod()
);
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
Mockito.verify(response).setHeader(
Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
Mockito.eq("mock"));
}
private void testGetToken(String renewer, Text expectedTokenKind)
throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).thenReturn(op.getHttpMethod());
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Mockito.when(response.getWriter()).thenReturn(new PrintWriter(
new StringWriter()));
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
"&" + DelegationTokenAuthenticator.RENEWER_PARAM + "=" + renewer);
Mockito.reset(response);
Mockito.reset(token);
Mockito.when(token.getUserName()).thenReturn("user");
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Assert.assertFalse(handler.managementOperation(token, request, response));
if (renewer == null) {
Mockito.verify(token).getUserName();
} else {
Mockito.verify(token).getUserName();
}
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
pwriter.close();
String responseOutput = writer.toString();
String tokenLabel = DelegationTokenAuthenticator.
DELEGATION_TOKEN_JSON;
Assert.assertTrue(responseOutput.contains(tokenLabel));
Assert.assertTrue(responseOutput.contains(
DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
ObjectMapper jsonMapper = new ObjectMapper();
Map json = jsonMapper.readValue(responseOutput, Map.class);
json = (Map) json.get(tokenLabel);
String tokenStr;
tokenStr = (String) json.get(DelegationTokenAuthenticator.
DELEGATION_TOKEN_URL_STRING_JSON);
Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenStr);
handler.getTokenManager().verifyToken(dt);
Assert.assertEquals(expectedTokenKind, dt.getKind());
}
@SuppressWarnings("unchecked")
private void testCancelToken() throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
CANCELDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
Token<DelegationTokenIdentifier> token =
(Token<DelegationTokenIdentifier>) handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "foo");
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() + "&" +
DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
token.encodeToUrlString()
);
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
try {
handler.getTokenManager().verifyToken(token);
Assert.fail();
} catch (SecretManager.InvalidToken ex) {
//NOP
} catch (Throwable ex) {
Assert.fail();
}
}
@SuppressWarnings("unchecked")
private void testRenewToken() throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
RENEWDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
Mockito.verify(response).setHeader(Mockito.eq(
KerberosAuthenticator.WWW_AUTHENTICATE),
Mockito.eq("mock")
);
Mockito.reset(response);
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Token<DelegationTokenIdentifier> dToken =
(Token<DelegationTokenIdentifier>) handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
"&" + DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
dToken.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
pwriter.close();
Assert.assertTrue(writer.toString().contains("long"));
handler.getTokenManager().verifyToken(dToken);
}
@Test
public void testAuthenticate() throws Exception {
testValidDelegationTokenQueryString();
testValidDelegationTokenHeader();
testInvalidDelegationTokenQueryString();
testInvalidDelegationTokenHeader();
}
@SuppressWarnings("unchecked")
private void testValidDelegationTokenQueryString() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
(Token<DelegationTokenIdentifier>) handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.DELEGATION_PARAM + "=" +
dToken.encodeToUrlString());
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals(UserGroupInformation.getCurrentUser().
getShortUserName(), token.getUserName());
Assert.assertEquals(0, token.getExpires());
Assert.assertEquals(handler.getType(),
token.getType());
Assert.assertTrue(token.isExpired());
}
@SuppressWarnings("unchecked")
private void testValidDelegationTokenHeader() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
(Token<DelegationTokenIdentifier>) handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getHeader(Mockito.eq(
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
dToken.encodeToUrlString());
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals(UserGroupInformation.getCurrentUser().
getShortUserName(), token.getUserName());
Assert.assertEquals(0, token.getExpires());
Assert.assertEquals(handler.getType(),
token.getType());
Assert.assertTrue(token.isExpired());
}
private void testInvalidDelegationTokenQueryString() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.DELEGATION_PARAM + "=invalid");
StringWriter writer = new StringWriter();
Mockito.when(response.getWriter()).thenReturn(new PrintWriter(writer));
Assert.assertNull(handler.authenticate(request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_FORBIDDEN);
Assert.assertTrue(writer.toString().contains("AuthenticationException"));
}
private void testInvalidDelegationTokenHeader() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getHeader(Mockito.eq(
DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER))).thenReturn(
"invalid");
StringWriter writer = new StringWriter();
Mockito.when(response.getWriter()).thenReturn(new PrintWriter(writer));
Assert.assertNull(handler.authenticate(request, response));
Assert.assertTrue(writer.toString().contains("AuthenticationException"));
}
}
| 15,339 | 41.375691 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(Parameterized.class)
public class TestDelegationTokenManager {
private static final long DAY_IN_SECS = 86400;
@Parameterized.Parameters
public static Collection<Object[]> headers() {
return Arrays.asList(new Object[][] { { false }, { true } });
}
private boolean enableZKKey;
public TestDelegationTokenManager(boolean enableZKKey) {
this.enableZKKey = enableZKKey;
}
@SuppressWarnings("unchecked")
@Test
public void testDTManager() throws Exception {
Configuration conf = new Configuration(false);
conf.setLong(DelegationTokenManager.UPDATE_INTERVAL, DAY_IN_SECS);
conf.setLong(DelegationTokenManager.MAX_LIFETIME, DAY_IN_SECS);
conf.setLong(DelegationTokenManager.RENEW_INTERVAL, DAY_IN_SECS);
conf.setLong(DelegationTokenManager.REMOVAL_SCAN_INTERVAL, DAY_IN_SECS);
conf.getBoolean(DelegationTokenManager.ENABLE_ZK_KEY, enableZKKey);
DelegationTokenManager tm =
new DelegationTokenManager(conf, new Text("foo"));
tm.init();
Token<DelegationTokenIdentifier> token =
(Token<DelegationTokenIdentifier>) tm.createToken(
UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
tm.verifyToken(token);
Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
tm.cancelToken(token, "foo");
try {
tm.verifyToken(token);
Assert.fail();
} catch (IOException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
tm.destroy();
}
}
| 2,782 | 33.7875 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.ssl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.math.BigInteger;
import java.net.URL;
import java.security.GeneralSecurityException;
import java.security.Key;
import java.security.KeyPair;
import java.security.KeyPairGenerator;
import java.security.KeyStore;
import java.security.NoSuchAlgorithmException;
import java.security.PrivateKey;
import java.security.SecureRandom;
import java.security.cert.Certificate;
import java.security.cert.X509Certificate;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.security.InvalidKeyException;
import java.security.NoSuchProviderException;
import java.security.SignatureException;
import java.security.cert.CertificateEncodingException;
import java.security.cert.CertificateException;
import java.security.cert.CertificateFactory;
import javax.security.auth.x500.X500Principal;
import org.bouncycastle.x509.X509V1CertificateGenerator;
public class KeyStoreTestUtil {
public static String getClasspathDir(Class klass) throws Exception {
String file = klass.getName();
file = file.replace('.', '/') + ".class";
URL url = Thread.currentThread().getContextClassLoader().getResource(file);
String baseDir = url.toURI().getPath();
baseDir = baseDir.substring(0, baseDir.length() - file.length() - 1);
return baseDir;
}
@SuppressWarnings("deprecation")
/**
* Create a self-signed X.509 Certificate.
*
* @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB"
* @param pair the KeyPair
* @param days how many days from now the Certificate is valid for
* @param algorithm the signing algorithm, eg "SHA1withRSA"
* @return the self-signed certificate
*/
public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, String algorithm)
throws CertificateEncodingException,
InvalidKeyException,
IllegalStateException,
NoSuchProviderException, NoSuchAlgorithmException, SignatureException{
Date from = new Date();
Date to = new Date(from.getTime() + days * 86400000l);
BigInteger sn = new BigInteger(64, new SecureRandom());
KeyPair keyPair = pair;
X509V1CertificateGenerator certGen = new X509V1CertificateGenerator();
X500Principal dnName = new X500Principal(dn);
certGen.setSerialNumber(sn);
certGen.setIssuerDN(dnName);
certGen.setNotBefore(from);
certGen.setNotAfter(to);
certGen.setSubjectDN(dnName);
certGen.setPublicKey(keyPair.getPublic());
certGen.setSignatureAlgorithm(algorithm);
X509Certificate cert = certGen.generate(pair.getPrivate());
return cert;
}
public static KeyPair generateKeyPair(String algorithm)
throws NoSuchAlgorithmException {
KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm);
keyGen.initialize(1024);
return keyGen.genKeyPair();
}
private static KeyStore createEmptyKeyStore()
throws GeneralSecurityException, IOException {
KeyStore ks = KeyStore.getInstance("JKS");
ks.load(null, null); // initialize
return ks;
}
private static void saveKeyStore(KeyStore ks, String filename,
String password)
throws GeneralSecurityException, IOException {
FileOutputStream out = new FileOutputStream(filename);
try {
ks.store(out, password.toCharArray());
} finally {
out.close();
}
}
public static void createKeyStore(String filename,
String password, String alias,
Key privateKey, Certificate cert)
throws GeneralSecurityException, IOException {
KeyStore ks = createEmptyKeyStore();
ks.setKeyEntry(alias, privateKey, password.toCharArray(),
new Certificate[]{cert});
saveKeyStore(ks, filename, password);
}
/**
* Creates a keystore with a single key and saves it to a file.
*
* @param filename String file to save
* @param password String store password to set on keystore
* @param keyPassword String key password to set on key
* @param alias String alias to use for the key
* @param privateKey Key to save in keystore
* @param cert Certificate to use as certificate chain associated to key
* @throws GeneralSecurityException for any error with the security APIs
* @throws IOException if there is an I/O error saving the file
*/
public static void createKeyStore(String filename,
String password, String keyPassword, String alias,
Key privateKey, Certificate cert)
throws GeneralSecurityException, IOException {
KeyStore ks = createEmptyKeyStore();
ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(),
new Certificate[]{cert});
saveKeyStore(ks, filename, password);
}
public static void createTrustStore(String filename,
String password, String alias,
Certificate cert)
throws GeneralSecurityException, IOException {
KeyStore ks = createEmptyKeyStore();
ks.setCertificateEntry(alias, cert);
saveKeyStore(ks, filename, password);
}
public static <T extends Certificate> void createTrustStore(
String filename, String password, Map<String, T> certs)
throws GeneralSecurityException, IOException {
KeyStore ks = createEmptyKeyStore();
for (Map.Entry<String, T> cert : certs.entrySet()) {
ks.setCertificateEntry(cert.getKey(), cert.getValue());
}
saveKeyStore(ks, filename, password);
}
public static void cleanupSSLConfig(String keystoresDir, String sslConfDir)
throws Exception {
File f = new File(keystoresDir + "/clientKS.jks");
f.delete();
f = new File(keystoresDir + "/serverKS.jks");
f.delete();
f = new File(keystoresDir + "/trustKS.jks");
f.delete();
f = new File(sslConfDir + "/ssl-client.xml");
f.delete();
f = new File(sslConfDir + "/ssl-server.xml");
f.delete();
}
/**
* Performs complete setup of SSL configuration in preparation for testing an
* SSLFactory. This includes keys, certs, keystores, truststores, the server
* SSL configuration file, the client SSL configuration file, and the master
* configuration file read by the SSLFactory.
*
* @param keystoresDir String directory to save keystores
* @param sslConfDir String directory to save SSL configuration files
* @param conf Configuration master configuration to be used by an SSLFactory,
* which will be mutated by this method
* @param useClientCert boolean true to make the client present a cert in the
* SSL handshake
*/
public static void setupSSLConfig(String keystoresDir, String sslConfDir,
Configuration conf, boolean useClientCert) throws Exception {
setupSSLConfig(keystoresDir, sslConfDir, conf, useClientCert, true);
}
/**
* Performs complete setup of SSL configuration in preparation for testing an
* SSLFactory. This includes keys, certs, keystores, truststores, the server
* SSL configuration file, the client SSL configuration file, and the master
* configuration file read by the SSLFactory.
*
* @param keystoresDir String directory to save keystores
* @param sslConfDir String directory to save SSL configuration files
* @param conf Configuration master configuration to be used by an SSLFactory,
* which will be mutated by this method
* @param useClientCert boolean true to make the client present a cert in the
* SSL handshake
* @param trustStore boolean true to create truststore, false not to create it
*/
public static void setupSSLConfig(String keystoresDir, String sslConfDir,
Configuration conf, boolean useClientCert,
boolean trustStore)
throws Exception {
String clientKS = keystoresDir + "/clientKS.jks";
String clientPassword = "clientP";
String serverKS = keystoresDir + "/serverKS.jks";
String serverPassword = "serverP";
String trustKS = null;
String trustPassword = "trustP";
File sslClientConfFile = new File(sslConfDir + "/ssl-client.xml");
File sslServerConfFile = new File(sslConfDir + "/ssl-server.xml");
Map<String, X509Certificate> certs = new HashMap<String, X509Certificate>();
if (useClientCert) {
KeyPair cKP = KeyStoreTestUtil.generateKeyPair("RSA");
X509Certificate cCert =
KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30,
"SHA1withRSA");
KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client",
cKP.getPrivate(), cCert);
certs.put("client", cCert);
}
KeyPair sKP = KeyStoreTestUtil.generateKeyPair("RSA");
X509Certificate sCert =
KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30,
"SHA1withRSA");
KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server",
sKP.getPrivate(), sCert);
certs.put("server", sCert);
if (trustStore) {
trustKS = keystoresDir + "/trustKS.jks";
KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs);
}
Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword,
clientPassword, trustKS);
Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword,
serverPassword, trustKS);
saveConfig(sslClientConfFile, clientSSLConf);
saveConfig(sslServerConfFile, serverSSLConf);
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
conf.set(SSLFactory.SSL_CLIENT_CONF_KEY, sslClientConfFile.getName());
conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName());
conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert);
}
/**
* Creates SSL configuration for a client.
*
* @param clientKS String client keystore file
* @param password String store password, or null to avoid setting store
* password
* @param keyPassword String key password, or null to avoid setting key
* password
* @param trustKS String truststore file
* @return Configuration for client SSL
*/
public static Configuration createClientSSLConfig(String clientKS,
String password, String keyPassword, String trustKS) {
Configuration clientSSLConf = createSSLConfig(SSLFactory.Mode.CLIENT,
clientKS, password, keyPassword, trustKS);
return clientSSLConf;
}
/**
* Creates SSL configuration for a server.
*
* @param serverKS String server keystore file
* @param password String store password, or null to avoid setting store
* password
* @param keyPassword String key password, or null to avoid setting key
* password
* @param trustKS String truststore file
* @return Configuration for server SSL
*/
public static Configuration createServerSSLConfig(String serverKS,
String password, String keyPassword, String trustKS) throws IOException {
Configuration serverSSLConf = createSSLConfig(SSLFactory.Mode.SERVER,
serverKS, password, keyPassword, trustKS);
return serverSSLConf;
}
/**
* Creates SSL configuration.
*
* @param mode SSLFactory.Mode mode to configure
* @param keystore String keystore file
* @param password String store password, or null to avoid setting store
* password
* @param keyPassword String key password, or null to avoid setting key
* password
* @param trustKS String truststore file
* @return Configuration for SSL
*/
private static Configuration createSSLConfig(SSLFactory.Mode mode,
String keystore, String password, String keyPassword, String trustKS) {
String trustPassword = "trustP";
Configuration sslConf = new Configuration(false);
if (keystore != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), keystore);
}
if (password != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), password);
}
if (keyPassword != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY),
keyPassword);
}
if (trustKS != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
}
if (trustPassword != null) {
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY),
trustPassword);
}
sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
return sslConf;
}
/**
* Saves configuration to a file.
*
* @param file File to save
* @param conf Configuration contents to write to file
* @throws IOException if there is an I/O error saving the file
*/
public static void saveConfig(File file, Configuration conf)
throws IOException {
Writer writer = new FileWriter(file);
try {
conf.writeXml(writer);
} finally {
writer.close();
}
}
public static void provisionPasswordsToCredentialProvider() throws Exception {
File testDir = new File(System.getProperty("test.build.data",
"target/test-dir"));
Configuration conf = new Configuration();
final Path jksPath = new Path(testDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
File file = new File(testDir, "test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
CredentialProvider provider =
CredentialProviderFactory.getProviders(conf).get(0);
char[] keypass = {'k', 'e', 'y', 'p', 'a', 's', 's'};
char[] storepass = {'s', 't', 'o', 'r', 'e', 'p', 'a', 's', 's'};
// create new aliases
try {
provider.createCredentialEntry(
FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY),
storepass);
provider.createCredentialEntry(
FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER,
FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY),
keypass);
// write out so that it can be found in checks
provider.flush();
} catch (Exception e) {
e.printStackTrace();
throw e;
}
}
}
| 15,849 | 37.285024 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.ssl;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import javax.net.ssl.HttpsURLConnection;
import java.io.File;
import java.net.URL;
import java.security.GeneralSecurityException;
import java.security.KeyPair;
import java.security.cert.X509Certificate;
import java.util.Collections;
import java.util.Map;
public class TestSSLFactory {
private static final String BASEDIR =
System.getProperty("test.build.dir", "target/test-dir") + "/" +
TestSSLFactory.class.getSimpleName();
private static final String KEYSTORES_DIR =
new File(BASEDIR).getAbsolutePath();
private String sslConfsDir;
@BeforeClass
public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
}
private Configuration createConfiguration(boolean clientCert,
boolean trustStore)
throws Exception {
Configuration conf = new Configuration();
KeyStoreTestUtil.setupSSLConfig(KEYSTORES_DIR, sslConfsDir, conf,
clientCert, trustStore);
return conf;
}
@After
@Before
public void cleanUp() throws Exception {
sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir);
}
@Test(expected = IllegalStateException.class)
public void clientMode() throws Exception {
Configuration conf = createConfiguration(false, true);
SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
try {
sslFactory.init();
Assert.assertNotNull(sslFactory.createSSLSocketFactory());
Assert.assertNotNull(sslFactory.getHostnameVerifier());
sslFactory.createSSLServerSocketFactory();
} finally {
sslFactory.destroy();
}
}
private void serverMode(boolean clientCert, boolean socket) throws Exception {
Configuration conf = createConfiguration(clientCert, true);
SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
try {
sslFactory.init();
Assert.assertNotNull(sslFactory.createSSLServerSocketFactory());
Assert.assertEquals(clientCert, sslFactory.isClientCertRequired());
if (socket) {
sslFactory.createSSLSocketFactory();
} else {
sslFactory.getHostnameVerifier();
}
} finally {
sslFactory.destroy();
}
}
@Test(expected = IllegalStateException.class)
public void serverModeWithoutClientCertsSocket() throws Exception {
serverMode(false, true);
}
@Test(expected = IllegalStateException.class)
public void serverModeWithClientCertsSocket() throws Exception {
serverMode(true, true);
}
@Test(expected = IllegalStateException.class)
public void serverModeWithoutClientCertsVerifier() throws Exception {
serverMode(false, false);
}
@Test(expected = IllegalStateException.class)
public void serverModeWithClientCertsVerifier() throws Exception {
serverMode(true, false);
}
@Test
public void validHostnameVerifier() throws Exception {
Configuration conf = createConfiguration(false, true);
conf.unset(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY);
SSLFactory sslFactory = new
SSLFactory(SSLFactory.Mode.CLIENT, conf);
sslFactory.init();
Assert.assertEquals("DEFAULT", sslFactory.getHostnameVerifier().toString());
sslFactory.destroy();
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
sslFactory.init();
Assert.assertEquals("ALLOW_ALL",
sslFactory.getHostnameVerifier().toString());
sslFactory.destroy();
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "DEFAULT_AND_LOCALHOST");
sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
sslFactory.init();
Assert.assertEquals("DEFAULT_AND_LOCALHOST",
sslFactory.getHostnameVerifier().toString());
sslFactory.destroy();
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT");
sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
sslFactory.init();
Assert.assertEquals("STRICT", sslFactory.getHostnameVerifier().toString());
sslFactory.destroy();
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT_IE6");
sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
sslFactory.init();
Assert.assertEquals("STRICT_IE6",
sslFactory.getHostnameVerifier().toString());
sslFactory.destroy();
}
@Test(expected = GeneralSecurityException.class)
public void invalidHostnameVerifier() throws Exception {
Configuration conf = createConfiguration(false, true);
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo");
SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
try {
sslFactory.init();
} finally {
sslFactory.destroy();
}
}
@Test
public void testConnectionConfigurator() throws Exception {
Configuration conf = createConfiguration(false, true);
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT_IE6");
SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
try {
sslFactory.init();
HttpsURLConnection sslConn =
(HttpsURLConnection) new URL("https://foo").openConnection();
Assert.assertNotSame("STRICT_IE6",
sslConn.getHostnameVerifier().toString());
sslFactory.configure(sslConn);
Assert.assertEquals("STRICT_IE6",
sslConn.getHostnameVerifier().toString());
} finally {
sslFactory.destroy();
}
}
@Test
public void testServerDifferentPasswordAndKeyPassword() throws Exception {
checkSSLFactoryInitWithPasswords(SSLFactory.Mode.SERVER, "password",
"keyPassword", "password", "keyPassword");
}
@Test
public void testServerKeyPasswordDefaultsToPassword() throws Exception {
checkSSLFactoryInitWithPasswords(SSLFactory.Mode.SERVER, "password",
"password", "password", null);
}
@Test
public void testClientDifferentPasswordAndKeyPassword() throws Exception {
checkSSLFactoryInitWithPasswords(SSLFactory.Mode.CLIENT, "password",
"keyPassword", "password", "keyPassword");
}
@Test
public void testClientKeyPasswordDefaultsToPassword() throws Exception {
checkSSLFactoryInitWithPasswords(SSLFactory.Mode.CLIENT, "password",
"password", "password", null);
}
@Test
public void testServerCredProviderPasswords() throws Exception {
KeyStoreTestUtil.provisionPasswordsToCredentialProvider();
checkSSLFactoryInitWithPasswords(SSLFactory.Mode.SERVER,
"storepass", "keypass", null, null, true);
}
/**
* Checks that SSLFactory initialization is successful with the given
* arguments. This is a helper method for writing test cases that cover
* different combinations of settings for the store password and key password.
* It takes care of bootstrapping a keystore, a truststore, and SSL client or
* server configuration. Then, it initializes an SSLFactory. If no exception
* is thrown, then initialization was successful.
*
* @param mode SSLFactory.Mode mode to test
* @param password String store password to set on keystore
* @param keyPassword String key password to set on keystore
* @param confPassword String store password to set in SSL config file, or null
* to avoid setting in SSL config file
* @param confKeyPassword String key password to set in SSL config file, or
* null to avoid setting in SSL config file
* @throws Exception for any error
*/
private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode,
String password, String keyPassword, String confPassword,
String confKeyPassword) throws Exception {
checkSSLFactoryInitWithPasswords(mode, password, keyPassword,
confPassword, confKeyPassword, false);
}
/**
* Checks that SSLFactory initialization is successful with the given
* arguments. This is a helper method for writing test cases that cover
* different combinations of settings for the store password and key password.
* It takes care of bootstrapping a keystore, a truststore, and SSL client or
* server configuration. Then, it initializes an SSLFactory. If no exception
* is thrown, then initialization was successful.
*
* @param mode SSLFactory.Mode mode to test
* @param password String store password to set on keystore
* @param keyPassword String key password to set on keystore
* @param confPassword String store password to set in SSL config file, or null
* to avoid setting in SSL config file
* @param confKeyPassword String key password to set in SSL config file, or
* null to avoid setting in SSL config file
* @param useCredProvider boolean to indicate whether passwords should be set
* into the config or not. When set to true nulls are set and aliases are
* expected to be resolved through credential provider API through the
* Configuration.getPassword method
* @throws Exception for any error
*/
private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode,
String password, String keyPassword, String confPassword,
String confKeyPassword, boolean useCredProvider) throws Exception {
String keystore = new File(KEYSTORES_DIR, "keystore.jks").getAbsolutePath();
String truststore = new File(KEYSTORES_DIR, "truststore.jks")
.getAbsolutePath();
String trustPassword = "trustP";
// Create keys, certs, keystore, and truststore.
KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
X509Certificate cert = KeyStoreTestUtil.generateCertificate("CN=Test",
keyPair, 30, "SHA1withRSA");
KeyStoreTestUtil.createKeyStore(keystore, password, keyPassword, "Test",
keyPair.getPrivate(), cert);
Map<String, X509Certificate> certs = Collections.singletonMap("server",
cert);
KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs);
// Create SSL configuration file, for either server or client.
final String sslConfFileName;
final Configuration sslConf;
// if the passwords are provisioned in a cred provider then don't set them
// in the configuration properly - expect them to be resolved through the
// provider
if (useCredProvider) {
confPassword = null;
confKeyPassword = null;
}
if (mode == SSLFactory.Mode.SERVER) {
sslConfFileName = "ssl-server.xml";
sslConf = KeyStoreTestUtil.createServerSSLConfig(keystore, confPassword,
confKeyPassword, truststore);
if (useCredProvider) {
File testDir = new File(System.getProperty("test.build.data",
"target/test-dir"));
final Path jksPath = new Path(testDir.toString(), "test.jks");
final String ourUrl =
JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
sslConf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, ourUrl);
}
} else {
sslConfFileName = "ssl-client.xml";
sslConf = KeyStoreTestUtil.createClientSSLConfig(keystore, confPassword,
confKeyPassword, truststore);
}
KeyStoreTestUtil.saveConfig(new File(sslConfsDir, sslConfFileName), sslConf);
// Create the master configuration for use by the SSLFactory, which by
// default refers to the ssl-server.xml or ssl-client.xml created above.
Configuration conf = new Configuration();
conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, true);
// Try initializing an SSLFactory.
SSLFactory sslFactory = new SSLFactory(mode, conf);
try {
sslFactory.init();
} finally {
sslFactory.destroy();
}
}
@Test
public void testNoClientCertsInitialization() throws Exception {
Configuration conf = createConfiguration(false, true);
conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
try {
sslFactory.init();
} finally {
sslFactory.destroy();
}
}
@Test
public void testNoTrustStore() throws Exception {
Configuration conf = createConfiguration(false, false);
conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
try {
sslFactory.init();
} finally {
sslFactory.destroy();
}
}
}
| 13,706 | 37.075 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509TrustManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.ssl;
import org.apache.hadoop.fs.FileUtil;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.security.KeyPair;
import java.security.cert.X509Certificate;
import java.util.HashMap;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.createTrustStore;
import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.generateCertificate;
import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.generateKeyPair;
public class TestReloadingX509TrustManager {
private static final String BASEDIR =
System.getProperty("test.build.data", "target/test-dir") + "/" +
TestReloadingX509TrustManager.class.getSimpleName();
private X509Certificate cert1;
private X509Certificate cert2;
@BeforeClass
public static void setUp() throws Exception {
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
}
@Test(expected = IOException.class)
public void testLoadMissingTrustStore() throws Exception {
String truststoreLocation = BASEDIR + "/testmissing.jks";
ReloadingX509TrustManager tm =
new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10);
try {
tm.init();
} finally {
tm.destroy();
}
}
@Test(expected = IOException.class)
public void testLoadCorruptTrustStore() throws Exception {
String truststoreLocation = BASEDIR + "/testcorrupt.jks";
OutputStream os = new FileOutputStream(truststoreLocation);
os.write(1);
os.close();
ReloadingX509TrustManager tm =
new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10);
try {
tm.init();
} finally {
tm.destroy();
}
}
@Test
public void testReload() throws Exception {
KeyPair kp = generateKeyPair("RSA");
cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA");
cert2 = generateCertificate("CN=Cert2", kp, 30, "SHA1withRSA");
String truststoreLocation = BASEDIR + "/testreload.jks";
createTrustStore(truststoreLocation, "password", "cert1", cert1);
ReloadingX509TrustManager tm =
new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10);
try {
tm.init();
assertEquals(1, tm.getAcceptedIssuers().length);
// Wait so that the file modification time is different
Thread.sleep((tm.getReloadInterval() + 1000));
// Add another cert
Map<String, X509Certificate> certs = new HashMap<String, X509Certificate>();
certs.put("cert1", cert1);
certs.put("cert2", cert2);
createTrustStore(truststoreLocation, "password", certs);
// and wait to be sure reload has taken place
assertEquals(10, tm.getReloadInterval());
// Wait so that the file modification time is different
Thread.sleep((tm.getReloadInterval() + 200));
assertEquals(2, tm.getAcceptedIssuers().length);
} finally {
tm.destroy();
}
}
@Test
public void testReloadMissingTrustStore() throws Exception {
KeyPair kp = generateKeyPair("RSA");
cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA");
cert2 = generateCertificate("CN=Cert2", kp, 30, "SHA1withRSA");
String truststoreLocation = BASEDIR + "/testmissing.jks";
createTrustStore(truststoreLocation, "password", "cert1", cert1);
ReloadingX509TrustManager tm =
new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10);
try {
tm.init();
assertEquals(1, tm.getAcceptedIssuers().length);
X509Certificate cert = tm.getAcceptedIssuers()[0];
new File(truststoreLocation).delete();
// Wait so that the file modification time is different
Thread.sleep((tm.getReloadInterval() + 200));
assertEquals(1, tm.getAcceptedIssuers().length);
assertEquals(cert, tm.getAcceptedIssuers()[0]);
} finally {
tm.destroy();
}
}
@Test
public void testReloadCorruptTrustStore() throws Exception {
KeyPair kp = generateKeyPair("RSA");
cert1 = generateCertificate("CN=Cert1", kp, 30, "SHA1withRSA");
cert2 = generateCertificate("CN=Cert2", kp, 30, "SHA1withRSA");
String truststoreLocation = BASEDIR + "/testcorrupt.jks";
createTrustStore(truststoreLocation, "password", "cert1", cert1);
ReloadingX509TrustManager tm =
new ReloadingX509TrustManager("jks", truststoreLocation, "password", 10);
try {
tm.init();
assertEquals(1, tm.getAcceptedIssuers().length);
X509Certificate cert = tm.getAcceptedIssuers()[0];
OutputStream os = new FileOutputStream(truststoreLocation);
os.write(1);
os.close();
new File(truststoreLocation).setLastModified(System.currentTimeMillis() -
1000);
// Wait so that the file modification time is different
Thread.sleep((tm.getReloadInterval() + 200));
assertEquals(1, tm.getAcceptedIssuers().length);
assertEquals(cert, tm.getAcceptedIssuers()[0]);
} finally {
tm.destroy();
}
}
}
| 6,031 | 33.272727 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayPrimitiveWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import java.util.Arrays;
import org.apache.hadoop.util.StringUtils;
import org.junit.*;
import junit.framework.TestCase;
/** Unit tests for {@link ArrayPrimitiveWritable} */
public class TestArrayPrimitiveWritable extends TestCase {
static final boolean[] b = {true, true, false};
static final char[] c = {'a', 'b', 'c'};
static final byte[] by = {1, 2, 3};
static final short[] sh = {1, 2, 3};
static final int[] i = {1, 2, 3};
static final long[] lo = {1L, 2L, 3L};
static final float[] f = {(float) 1.0, (float) 2.5, (float) 3.3};
static final double[] d = {1.0, 2.5, 3.3};
static final Object[] bigSet = {b, c, by, sh, i, lo, f, d};
static final Object[] expectedResultSet = {b, b, c, c, by, by, sh, sh,
i, i, lo, lo, f, f, d, d};
final Object[] resultSet = new Object[bigSet.length * 2];
final DataOutputBuffer out = new DataOutputBuffer();
final DataInputBuffer in = new DataInputBuffer();
@Before
public void resetBuffers() throws IOException {
out.reset();
in.reset();
}
@Test
public void testMany() throws IOException {
//Write a big set of data, one of each primitive type array
for (Object x : bigSet) {
//write each test object two ways
//First, transparently via ObjectWritable
ObjectWritable.writeObject(out, x, x.getClass(), null, true);
//Second, explicitly via ArrayPrimitiveWritable
(new ArrayPrimitiveWritable(x)).write(out);
}
//Now read the data back in
in.reset(out.getData(), out.getLength());
for (int x = 0; x < resultSet.length; ) {
//First, transparently
resultSet[x++] = ObjectWritable.readObject(in, null);
//Second, explicitly
ArrayPrimitiveWritable apw = new ArrayPrimitiveWritable();
apw.readFields(in);
resultSet[x++] = apw.get();
}
//validate data structures and values
assertEquals(expectedResultSet.length, resultSet.length);
for (int x = 0; x < resultSet.length; x++) {
assertEquals("ComponentType of array " + x,
expectedResultSet[x].getClass().getComponentType(),
resultSet[x].getClass().getComponentType());
}
assertTrue("In and Out arrays didn't match values",
Arrays.deepEquals(expectedResultSet, resultSet));
}
@Test
@SuppressWarnings("deprecation")
public void testObjectLabeling() throws IOException {
//Do a few tricky experiments to make sure things are being written
//the way we expect
//Write the data array with ObjectWritable
//which will indirectly write it using APW.Internal
ObjectWritable.writeObject(out, i, i.getClass(), null, true);
//Write the corresponding APW directly with ObjectWritable
ArrayPrimitiveWritable apw = new ArrayPrimitiveWritable(i);
ObjectWritable.writeObject(out, apw, apw.getClass(), null, true);
//Get ready to read it back
in.reset(out.getData(), out.getLength());
//Read the int[] object as written by ObjectWritable, but
//"going around" ObjectWritable
String className = UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable was not labelled as "
+ "an ArrayPrimitiveWritable.Internal",
ArrayPrimitiveWritable.Internal.class.getName(), className);
ArrayPrimitiveWritable.Internal apwi =
new ArrayPrimitiveWritable.Internal();
apwi.readFields(in);
assertEquals("The ArrayPrimitiveWritable.Internal component type was corrupted",
int.class, apw.getComponentType());
assertTrue("The int[] written by ObjectWritable as "
+ "ArrayPrimitiveWritable.Internal was corrupted",
Arrays.equals(i, (int[])(apwi.get())));
//Read the APW object as written by ObjectWritable, but
//"going around" ObjectWritable
String declaredClassName = UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as "
+ "declaredClass ArrayPrimitiveWritable",
ArrayPrimitiveWritable.class.getName(), declaredClassName);
className = UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as "
+ "class ArrayPrimitiveWritable",
ArrayPrimitiveWritable.class.getName(), className);
ArrayPrimitiveWritable apw2 =
new ArrayPrimitiveWritable();
apw2.readFields(in);
assertEquals("The ArrayPrimitiveWritable component type was corrupted",
int.class, apw2.getComponentType());
assertTrue("The int[] written by ObjectWritable as "
+ "ArrayPrimitiveWritable was corrupted",
Arrays.equals(i, (int[])(apw2.get())));
}
@Test
public void testOldFormat() throws IOException {
//Make sure we still correctly write the old format if desired.
//Write the data array with old ObjectWritable API
//which will set allowCompactArrays false.
ObjectWritable.writeObject(out, i, i.getClass(), null);
//Get ready to read it back
in.reset(out.getData(), out.getLength());
//Read the int[] object as written by ObjectWritable, but
//"going around" ObjectWritable
@SuppressWarnings("deprecation")
String className = UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable as a non-compact array "
+ "was not labelled as an array of int",
i.getClass().getName(), className);
int length = in.readInt();
assertEquals("The int[] written by ObjectWritable as a non-compact array "
+ "was not expected length", i.length, length);
int[] readValue = new int[length];
try {
for (int i = 0; i < length; i++) {
readValue[i] = (int)((Integer)ObjectWritable.readObject(in, null));
}
} catch (Exception e) {
fail("The int[] written by ObjectWritable as a non-compact array "
+ "was corrupted. Failed to correctly read int[] of length "
+ length + ". Got exception:\n"
+ StringUtils.stringifyException(e));
}
assertTrue("The int[] written by ObjectWritable as a non-compact array "
+ "was corrupted.", Arrays.equals(i, readValue));
}
}
| 6,974 | 37.535912 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSortedMapWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.util.Map;
import org.junit.Test;
/**
* Tests SortedMapWritable
*/
public class TestSortedMapWritable {
/** the test */
@Test
@SuppressWarnings("unchecked")
public void testSortedMapWritable() {
Text[] keys = {
new Text("key1"),
new Text("key2"),
new Text("key3"),
};
BytesWritable[] values = {
new BytesWritable("value1".getBytes()),
new BytesWritable("value2".getBytes()),
new BytesWritable("value3".getBytes())
};
SortedMapWritable inMap = new SortedMapWritable();
for (int i = 0; i < keys.length; i++) {
inMap.put(keys[i], values[i]);
}
assertEquals(0, inMap.firstKey().compareTo(keys[0]));
assertEquals(0, inMap.lastKey().compareTo(keys[2]));
SortedMapWritable outMap = new SortedMapWritable(inMap);
assertEquals(inMap.size(), outMap.size());
for (Map.Entry<WritableComparable, Writable> e: inMap.entrySet()) {
assertTrue(outMap.containsKey(e.getKey()));
assertEquals(0, ((WritableComparable) outMap.get(e.getKey())).compareTo(
e.getValue()));
}
// Now for something a little harder...
Text[] maps = {
new Text("map1"),
new Text("map2")
};
SortedMapWritable mapOfMaps = new SortedMapWritable();
mapOfMaps.put(maps[0], inMap);
mapOfMaps.put(maps[1], outMap);
SortedMapWritable copyOfMapOfMaps = new SortedMapWritable(mapOfMaps);
for (int i = 0; i < maps.length; i++) {
assertTrue(copyOfMapOfMaps.containsKey(maps[i]));
SortedMapWritable a = (SortedMapWritable) mapOfMaps.get(maps[i]);
SortedMapWritable b = (SortedMapWritable) copyOfMapOfMaps.get(maps[i]);
assertEquals(a.size(), b.size());
for (Writable key: a.keySet()) {
assertTrue(b.containsKey(key));
// This will work because we know what we put into each set
WritableComparable aValue = (WritableComparable) a.get(key);
WritableComparable bValue = (WritableComparable) b.get(key);
assertEquals(0, aValue.compareTo(bValue));
}
}
}
/**
* Test that number of "unknown" classes is propagated across multiple copies.
*/
@Test
@SuppressWarnings("deprecation")
public void testForeignClass() {
SortedMapWritable inMap = new SortedMapWritable();
inMap.put(new Text("key"), new UTF8("value"));
inMap.put(new Text("key2"), new UTF8("value2"));
SortedMapWritable outMap = new SortedMapWritable(inMap);
SortedMapWritable copyOfCopy = new SortedMapWritable(outMap);
assertEquals(1, copyOfCopy.getNewClasses());
}
/**
* Tests if equal and hashCode method still hold the contract.
*/
@Test
public void testEqualsAndHashCode() {
String failureReason;
SortedMapWritable mapA = new SortedMapWritable();
SortedMapWritable mapB = new SortedMapWritable();
// Sanity checks
failureReason = "SortedMapWritable couldn't be initialized. Got null reference";
assertNotNull(failureReason, mapA);
assertNotNull(failureReason, mapB);
// Basic null check
assertFalse("equals method returns true when passed null", mapA.equals(null));
// When entry set is empty, they should be equal
assertTrue("Two empty SortedMapWritables are no longer equal", mapA.equals(mapB));
// Setup
Text[] keys = {
new Text("key1"),
new Text("key2")
};
BytesWritable[] values = {
new BytesWritable("value1".getBytes()),
new BytesWritable("value2".getBytes())
};
mapA.put(keys[0], values[0]);
mapB.put(keys[1], values[1]);
// entrySets are different
failureReason = "Two SortedMapWritables with different data are now equal";
assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason, !mapA.equals(mapB));
assertTrue(failureReason, !mapB.equals(mapA));
mapA.put(keys[1], values[1]);
mapB.put(keys[0], values[0]);
// entrySets are now same
failureReason = "Two SortedMapWritables with same entry sets formed in different order are now different";
assertEquals(failureReason, mapA.hashCode(), mapB.hashCode());
assertTrue(failureReason, mapA.equals(mapB));
assertTrue(failureReason, mapB.equals(mapA));
// Let's check if entry sets of same keys but different values
mapA.put(keys[0], values[1]);
mapA.put(keys[1], values[0]);
failureReason = "Two SortedMapWritables with different content are now equal";
assertTrue(failureReason, mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason, !mapA.equals(mapB));
assertTrue(failureReason, !mapB.equals(mapA));
}
@Test(timeout = 1000)
public void testPutAll() {
SortedMapWritable map1 = new SortedMapWritable();
SortedMapWritable map2 = new SortedMapWritable();
map1.put(new Text("key"), new Text("value"));
map2.putAll(map1);
assertEquals("map1 entries don't match map2 entries", map1, map2);
assertTrue(
"map2 doesn't have class information from map1",
map2.classToIdMap.containsKey(Text.class)
&& map2.idToClassMap.containsValue(Text.class));
}
}
| 6,243 | 33.307692 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/AvroTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.ByteArrayOutputStream;
import java.lang.reflect.Type;
import org.apache.avro.Schema;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.reflect.ReflectDatumWriter;
import org.apache.avro.reflect.ReflectDatumReader;
import org.apache.avro.io.DecoderFactory;
import static junit.framework.TestCase.assertEquals;
public class AvroTestUtil {
public static void testReflect(Object value, String schema) throws Exception {
testReflect(value, value.getClass(), schema);
}
public static void testReflect(Object value, Type type, String schema)
throws Exception {
// check that schema matches expected
Schema s = ReflectData.get().getSchema(type);
assertEquals(Schema.parse(schema), s);
// check that value is serialized correctly
ReflectDatumWriter<Object> writer = new ReflectDatumWriter<Object>(s);
ByteArrayOutputStream out = new ByteArrayOutputStream();
writer.write(value, EncoderFactory.get().directBinaryEncoder(out, null));
ReflectDatumReader<Object> reader = new ReflectDatumReader<Object>(s);
Object after =
reader.read(null,
DecoderFactory.get().binaryDecoder(out.toByteArray(), null));
assertEquals(value, after);
}
}
| 2,122 | 35.603448 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import junit.framework.TestCase;
public class TestWritableUtils extends TestCase {
private static final Log LOG = LogFactory.getLog(TestWritableUtils.class);
public static void testValue(int val, int vintlen) throws IOException {
DataOutputBuffer buf = new DataOutputBuffer();
DataInputBuffer inbuf = new DataInputBuffer();
WritableUtils.writeVInt(buf, val);
if (LOG.isDebugEnabled()) {
LOG.debug("Value = " + val);
BytesWritable printer = new BytesWritable();
printer.set(buf.getData(), 0, buf.getLength());
LOG.debug("Buffer = " + printer);
}
inbuf.reset(buf.getData(), 0, buf.getLength());
assertEquals(val, WritableUtils.readVInt(inbuf));
assertEquals(vintlen, buf.getLength());
assertEquals(vintlen, WritableUtils.getVIntSize(val));
assertEquals(vintlen, WritableUtils.decodeVIntSize(buf.getData()[0]));
}
public static void testReadInRange(long val, int lower,
int upper, boolean expectSuccess) throws IOException {
DataOutputBuffer buf = new DataOutputBuffer();
DataInputBuffer inbuf = new DataInputBuffer();
WritableUtils.writeVLong(buf, val);
try {
inbuf.reset(buf.getData(), 0, buf.getLength());
long val2 = WritableUtils.readVIntInRange(inbuf, lower, upper);
if (!expectSuccess) {
fail("expected readVIntInRange to throw an exception");
}
assertEquals(val, val2);
} catch(IOException e) {
if (expectSuccess) {
LOG.error("unexpected exception:", e);
fail("readVIntInRange threw an unexpected exception");
}
}
}
public static void testVInt() throws Exception {
testValue(12, 1);
testValue(127, 1);
testValue(-112, 1);
testValue(-113, 2);
testValue(-128, 2);
testValue(128, 2);
testValue(-129, 2);
testValue(255, 2);
testValue(-256, 2);
testValue(256, 3);
testValue(-257, 3);
testValue(65535, 3);
testValue(-65536, 3);
testValue(65536, 4);
testValue(-65537, 4);
testReadInRange(123, 122, 123, true);
testReadInRange(123, 0, 100, false);
testReadInRange(0, 0, 100, true);
testReadInRange(-1, 0, 100, false);
testReadInRange(1099511627776L, 0, Integer.MAX_VALUE, false);
}
}
| 3,195 | 34.120879 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import junit.framework.TestCase;
import java.io.IOException;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.Random;
import com.google.common.base.Charsets;
import com.google.common.primitives.Bytes;
/** Unit tests for LargeUTF8. */
public class TestText extends TestCase {
private static final int NUM_ITERATIONS = 100;
public TestText(String name) { super(name); }
private static final Random RANDOM = new Random(1);
private static final int RAND_LEN = -1;
// generate a valid java String
private static String getTestString(int len) throws Exception {
StringBuilder buffer = new StringBuilder();
int length = (len==RAND_LEN) ? RANDOM.nextInt(1000) : len;
while (buffer.length()<length) {
int codePoint = RANDOM.nextInt(Character.MAX_CODE_POINT);
char tmpStr[] = new char[2];
if (Character.isDefined(codePoint)) {
//unpaired surrogate
if (codePoint < Character.MIN_SUPPLEMENTARY_CODE_POINT &&
!Character.isHighSurrogate((char)codePoint) &&
!Character.isLowSurrogate((char)codePoint)) {
Character.toChars(codePoint, tmpStr, 0);
buffer.append(tmpStr);
}
}
}
return buffer.toString();
}
public static String getTestString() throws Exception {
return getTestString(RAND_LEN);
}
public static String getLongString() throws Exception {
String str = getTestString();
int length = Short.MAX_VALUE+str.length();
StringBuilder buffer = new StringBuilder();
while(buffer.length()<length)
buffer.append(str);
return buffer.toString();
}
public void testWritable() throws Exception {
for (int i = 0; i < NUM_ITERATIONS; i++) {
String str;
if (i == 0)
str = getLongString();
else
str = getTestString();
TestWritable.testWritable(new Text(str));
}
}
public void testCoding() throws Exception {
String before = "Bad \t encoding \t testcase";
Text text = new Text(before);
String after = text.toString();
assertTrue(before.equals(after));
for (int i = 0; i < NUM_ITERATIONS; i++) {
// generate a random string
if (i == 0)
before = getLongString();
else
before = getTestString();
// test string to utf8
ByteBuffer bb = Text.encode(before);
byte[] utf8Text = bb.array();
byte[] utf8Java = before.getBytes("UTF-8");
assertEquals(0, WritableComparator.compareBytes(
utf8Text, 0, bb.limit(),
utf8Java, 0, utf8Java.length));
// test utf8 to string
after = Text.decode(utf8Java);
assertTrue(before.equals(after));
}
}
public void testIO() throws Exception {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
for (int i = 0; i < NUM_ITERATIONS; i++) {
// generate a random string
String before;
if (i == 0)
before = getLongString();
else
before = getTestString();
// write it
out.reset();
Text.writeString(out, before);
// test that it reads correctly
in.reset(out.getData(), out.getLength());
String after = Text.readString(in);
assertTrue(before.equals(after));
// Test compatibility with Java's other decoder
int strLenSize = WritableUtils.getVIntSize(Text.utf8Length(before));
String after2 = new String(out.getData(), strLenSize,
out.getLength()-strLenSize, "UTF-8");
assertTrue(before.equals(after2));
}
}
public void doTestLimitedIO(String str, int len) throws IOException {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
out.reset();
try {
Text.writeString(out, str, len);
fail("expected writeString to fail when told to write a string " +
"that was too long! The string was '" + str + "'");
} catch (IOException e) {
}
Text.writeString(out, str, len + 1);
// test that it reads correctly
in.reset(out.getData(), out.getLength());
in.mark(len);
String after;
try {
after = Text.readString(in, len);
fail("expected readString to fail when told to read a string " +
"that was too long! The string was '" + str + "'");
} catch (IOException e) {
}
in.reset();
after = Text.readString(in, len + 1);
assertTrue(str.equals(after));
}
public void testLimitedIO() throws Exception {
doTestLimitedIO("abcd", 3);
doTestLimitedIO("foo bar baz", 10);
doTestLimitedIO("1", 0);
}
public void testCompare() throws Exception {
DataOutputBuffer out1 = new DataOutputBuffer();
DataOutputBuffer out2 = new DataOutputBuffer();
DataOutputBuffer out3 = new DataOutputBuffer();
Text.Comparator comparator = new Text.Comparator();
for (int i=0; i<NUM_ITERATIONS; i++) {
// reset output buffer
out1.reset();
out2.reset();
out3.reset();
// generate two random strings
String str1 = getTestString();
String str2 = getTestString();
if (i == 0) {
str1 = getLongString();
str2 = getLongString();
} else {
str1 = getTestString();
str2 = getTestString();
}
// convert to texts
Text txt1 = new Text(str1);
Text txt2 = new Text(str2);
Text txt3 = new Text(str1);
// serialize them
txt1.write(out1);
txt2.write(out2);
txt3.write(out3);
// compare two strings by looking at their binary formats
int ret1 = comparator.compare(out1.getData(), 0, out1.getLength(),
out2.getData(), 0, out2.getLength());
// compare two strings
int ret2 = txt1.compareTo(txt2);
assertEquals(ret1, ret2);
assertEquals("Equivalence of different txt objects, same content" ,
0,
txt1.compareTo(txt3));
assertEquals("Equvalence of data output buffers",
0,
comparator.compare(out1.getData(), 0, out3.getLength(),
out3.getData(), 0, out3.getLength()));
}
}
public void testFind() throws Exception {
Text text = new Text("abcd\u20acbdcd\u20ac");
assertTrue(text.find("abd")==-1);
assertTrue(text.find("ac")==-1);
assertTrue(text.find("\u20ac")==4);
assertTrue(text.find("\u20ac", 5)==11);
}
public void testFindAfterUpdatingContents() throws Exception {
Text text = new Text("abcd");
text.set("a".getBytes());
assertEquals(text.getLength(),1);
assertEquals(text.find("a"), 0);
assertEquals(text.find("b"), -1);
}
public void testValidate() throws Exception {
Text text = new Text("abcd\u20acbdcd\u20ac");
byte [] utf8 = text.getBytes();
int length = text.getLength();
Text.validateUTF8(utf8, 0, length);
}
public void testClear() throws Exception {
// Test lengths on an empty text object
Text text = new Text();
assertEquals(
"Actual string on an empty text object must be an empty string",
"", text.toString());
assertEquals("Underlying byte array length must be zero",
0, text.getBytes().length);
assertEquals("String's length must be zero",
0, text.getLength());
// Test if clear works as intended
text = new Text("abcd\u20acbdcd\u20ac");
int len = text.getLength();
text.clear();
assertEquals("String must be empty after clear()",
"", text.toString());
assertTrue(
"Length of the byte array must not decrease after clear()",
text.getBytes().length >= len);
assertEquals("Length of the string must be reset to 0 after clear()",
0, text.getLength());
}
public void testTextText() throws CharacterCodingException {
Text a=new Text("abc");
Text b=new Text("a");
b.set(a);
assertEquals("abc", b.toString());
a.append("xdefgxxx".getBytes(), 1, 4);
assertEquals("modified aliased string", "abc", b.toString());
assertEquals("appended string incorrectly", "abcdefg", a.toString());
// add an extra byte so that capacity = 14 and length = 8
a.append(new byte[]{'d'}, 0, 1);
assertEquals(14, a.getBytes().length);
assertEquals(8, a.copyBytes().length);
}
private class ConcurrentEncodeDecodeThread extends Thread {
public ConcurrentEncodeDecodeThread(String name) {
super(name);
}
@Override
public void run() {
final String name = this.getName();
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
for (int i=0; i < 1000; ++i) {
try {
out.reset();
WritableUtils.writeString(out, name);
in.reset(out.getData(), out.getLength());
String s = WritableUtils.readString(in);
assertEquals("input buffer reset contents = " + name, name, s);
} catch (Exception ioe) {
throw new RuntimeException(ioe);
}
}
}
}
public void testConcurrentEncodeDecode() throws Exception{
Thread thread1 = new ConcurrentEncodeDecodeThread("apache");
Thread thread2 = new ConcurrentEncodeDecodeThread("hadoop");
thread1.start();
thread2.start();
thread2.join();
thread2.join();
}
public void testAvroReflect() throws Exception {
AvroTestUtil.testReflect
(new Text("foo"),
"{\"type\":\"string\",\"java-class\":\"org.apache.hadoop.io.Text\"}");
}
/**
*
*/
public void testCharAt() {
String line = "adsawseeeeegqewgasddga";
Text text = new Text(line);
for (int i = 0; i < line.length(); i++) {
assertTrue("testCharAt error1 !!!", text.charAt(i) == line.charAt(i));
}
assertEquals("testCharAt error2 !!!", -1, text.charAt(-1));
assertEquals("testCharAt error3 !!!", -1, text.charAt(100));
}
/**
* test {@code Text} readFields/write operations
*/
public void testReadWriteOperations() {
String line = "adsawseeeeegqewgasddga";
byte[] inputBytes = line.getBytes();
inputBytes = Bytes.concat(new byte[] {(byte)22}, inputBytes);
DataInputBuffer in = new DataInputBuffer();
DataOutputBuffer out = new DataOutputBuffer();
Text text = new Text(line);
try {
in.reset(inputBytes, inputBytes.length);
text.readFields(in);
} catch(Exception ex) {
fail("testReadFields error !!!");
}
try {
text.write(out);
} catch(IOException ex) {
} catch(Exception ex) {
fail("testReadWriteOperations error !!!");
}
}
public void testReadWithKnownLength() throws IOException {
String line = "hello world";
byte[] inputBytes = line.getBytes(Charsets.UTF_8);
DataInputBuffer in = new DataInputBuffer();
Text text = new Text();
in.reset(inputBytes, inputBytes.length);
text.readWithKnownLength(in, 5);
assertEquals("hello", text.toString());
// Read longer length, make sure it lengthens
in.reset(inputBytes, inputBytes.length);
text.readWithKnownLength(in, 7);
assertEquals("hello w", text.toString());
// Read shorter length, make sure it shortens
in.reset(inputBytes, inputBytes.length);
text.readWithKnownLength(in, 2);
assertEquals("he", text.toString());
}
/**
* test {@code Text.bytesToCodePoint(bytes) }
* with {@code BufferUnderflowException}
*
*/
public void testBytesToCodePoint() {
try {
ByteBuffer bytes = ByteBuffer.wrap(new byte[] {-2, 45, 23, 12, 76, 89});
Text.bytesToCodePoint(bytes);
assertTrue("testBytesToCodePoint error !!!", bytes.position() == 6 );
} catch (BufferUnderflowException ex) {
fail("testBytesToCodePoint unexp exception");
} catch (Exception e) {
fail("testBytesToCodePoint unexp exception");
}
}
public void testbytesToCodePointWithInvalidUTF() {
try {
Text.bytesToCodePoint(ByteBuffer.wrap(new byte[] {-2}));
fail("testbytesToCodePointWithInvalidUTF error unexp exception !!!");
} catch (BufferUnderflowException ex) {
} catch(Exception e) {
fail("testbytesToCodePointWithInvalidUTF error unexp exception !!!");
}
}
public void testUtf8Length() {
assertEquals("testUtf8Length1 error !!!",
1, Text.utf8Length(new String(new char[]{(char)1})));
assertEquals("testUtf8Length127 error !!!",
1, Text.utf8Length(new String(new char[]{(char)127})));
assertEquals("testUtf8Length128 error !!!",
2, Text.utf8Length(new String(new char[]{(char)128})));
assertEquals("testUtf8Length193 error !!!",
2, Text.utf8Length(new String(new char[]{(char)193})));
assertEquals("testUtf8Length225 error !!!",
2, Text.utf8Length(new String(new char[]{(char)225})));
assertEquals("testUtf8Length254 error !!!",
2, Text.utf8Length(new String(new char[]{(char)254})));
}
public static void main(String[] args) throws Exception
{
TestText test = new TestText("main");
test.testIO();
test.testCompare();
test.testCoding();
test.testWritable();
test.testFind();
test.testValidate();
}
}
| 14,511 | 31.832579 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFileAppend.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile.Reader;
import org.apache.hadoop.io.SequenceFile.Writer;
import org.apache.hadoop.io.SequenceFile.Writer.Option;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.io.serializer.JavaSerializationComparator;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestSequenceFileAppend {
private static Configuration conf;
private static FileSystem fs;
private static Path ROOT_PATH = new Path(System.getProperty(
"test.build.data", "build/test/data"));
@BeforeClass
public static void setUp() throws Exception {
conf = new Configuration();
conf.set("io.serializations",
"org.apache.hadoop.io.serializer.JavaSerialization");
conf.set("fs.file.impl", "org.apache.hadoop.fs.RawLocalFileSystem");
fs = FileSystem.get(conf);
}
@AfterClass
public static void tearDown() throws Exception {
fs.close();
}
@Test(timeout = 30000)
public void testAppend() throws Exception {
Path file = new Path(ROOT_PATH, "testseqappend.seq");
fs.delete(file, true);
Text key1 = new Text("Key1");
Text value1 = new Text("Value1");
Text value2 = new Text("Updated");
SequenceFile.Metadata metadata = new SequenceFile.Metadata();
metadata.set(key1, value1);
Writer.Option metadataOption = Writer.metadata(metadata);
Writer writer = SequenceFile.createWriter(conf,
SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class), metadataOption);
writer.append(1L, "one");
writer.append(2L, "two");
writer.close();
verify2Values(file);
metadata.set(key1, value2);
writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class),
SequenceFile.Writer.appendIfExists(true), metadataOption);
// Verify the Meta data is not changed
assertEquals(value1, writer.metadata.get(key1));
writer.append(3L, "three");
writer.append(4L, "four");
writer.close();
verifyAll4Values(file);
// Verify the Meta data readable after append
Reader reader = new Reader(conf, Reader.file(file));
assertEquals(value1, reader.getMetadata().get(key1));
reader.close();
// Verify failure if the compression details are different
try {
Option wrongCompressOption = Writer.compression(CompressionType.RECORD,
new GzipCodec());
writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class),
SequenceFile.Writer.appendIfExists(true), wrongCompressOption);
writer.close();
fail("Expected IllegalArgumentException for compression options");
} catch (IllegalArgumentException IAE) {
// Expected exception. Ignore it
}
try {
Option wrongCompressOption = Writer.compression(CompressionType.BLOCK,
new DefaultCodec());
writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class),
SequenceFile.Writer.appendIfExists(true), wrongCompressOption);
writer.close();
fail("Expected IllegalArgumentException for compression options");
} catch (IllegalArgumentException IAE) {
// Expected exception. Ignore it
}
fs.deleteOnExit(file);
}
@Test(timeout = 30000)
public void testAppendRecordCompression() throws Exception {
GenericTestUtils.assumeInNativeProfile();
Path file = new Path(ROOT_PATH, "testseqappendblockcompr.seq");
fs.delete(file, true);
Option compressOption = Writer.compression(CompressionType.RECORD,
new GzipCodec());
Writer writer = SequenceFile.createWriter(conf,
SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class), compressOption);
writer.append(1L, "one");
writer.append(2L, "two");
writer.close();
verify2Values(file);
writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class),
SequenceFile.Writer.appendIfExists(true), compressOption);
writer.append(3L, "three");
writer.append(4L, "four");
writer.close();
verifyAll4Values(file);
fs.deleteOnExit(file);
}
@Test(timeout = 30000)
public void testAppendBlockCompression() throws Exception {
GenericTestUtils.assumeInNativeProfile();
Path file = new Path(ROOT_PATH, "testseqappendblockcompr.seq");
fs.delete(file, true);
Option compressOption = Writer.compression(CompressionType.BLOCK,
new GzipCodec());
Writer writer = SequenceFile.createWriter(conf,
SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class), compressOption);
writer.append(1L, "one");
writer.append(2L, "two");
writer.close();
verify2Values(file);
writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class),
SequenceFile.Writer.appendIfExists(true), compressOption);
writer.append(3L, "three");
writer.append(4L, "four");
writer.close();
verifyAll4Values(file);
// Verify failure if the compression details are different or not Provided
try {
writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class),
SequenceFile.Writer.appendIfExists(true));
writer.close();
fail("Expected IllegalArgumentException for compression options");
} catch (IllegalArgumentException IAE) {
// Expected exception. Ignore it
}
// Verify failure if the compression details are different
try {
Option wrongCompressOption = Writer.compression(CompressionType.RECORD,
new GzipCodec());
writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class),
SequenceFile.Writer.appendIfExists(true), wrongCompressOption);
writer.close();
fail("Expected IllegalArgumentException for compression options");
} catch (IllegalArgumentException IAE) {
// Expected exception. Ignore it
}
try {
Option wrongCompressOption = Writer.compression(CompressionType.BLOCK,
new DefaultCodec());
writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class),
SequenceFile.Writer.appendIfExists(true), wrongCompressOption);
writer.close();
fail("Expected IllegalArgumentException for compression options");
} catch (IllegalArgumentException IAE) {
// Expected exception. Ignore it
}
fs.deleteOnExit(file);
}
@Test(timeout = 30000)
public void testAppendSort() throws Exception {
GenericTestUtils.assumeInNativeProfile();
Path file = new Path(ROOT_PATH, "testseqappendSort.seq");
fs.delete(file, true);
Path sortedFile = new Path(ROOT_PATH, "testseqappendSort.seq.sort");
fs.delete(sortedFile, true);
SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs,
new JavaSerializationComparator<Long>(), Long.class, String.class, conf);
Option compressOption = Writer.compression(CompressionType.BLOCK,
new GzipCodec());
Writer writer = SequenceFile.createWriter(conf,
SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class), compressOption);
writer.append(2L, "two");
writer.append(1L, "one");
writer.close();
writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(file),
SequenceFile.Writer.keyClass(Long.class),
SequenceFile.Writer.valueClass(String.class),
SequenceFile.Writer.appendIfExists(true), compressOption);
writer.append(4L, "four");
writer.append(3L, "three");
writer.close();
// Sort file after append
sorter.sort(file, sortedFile);
verifyAll4Values(sortedFile);
fs.deleteOnExit(file);
fs.deleteOnExit(sortedFile);
}
private void verify2Values(Path file) throws IOException {
Reader reader = new Reader(conf, Reader.file(file));
assertEquals(1L, reader.next((Object) null));
assertEquals("one", reader.getCurrentValue((Object) null));
assertEquals(2L, reader.next((Object) null));
assertEquals("two", reader.getCurrentValue((Object) null));
assertNull(reader.next((Object) null));
reader.close();
}
private void verifyAll4Values(Path file) throws IOException {
Reader reader = new Reader(conf, Reader.file(file));
assertEquals(1L, reader.next((Object) null));
assertEquals("one", reader.getCurrentValue((Object) null));
assertEquals(2L, reader.next((Object) null));
assertEquals("two", reader.getCurrentValue((Object) null));
assertEquals(3L, reader.next((Object) null));
assertEquals("three", reader.getCurrentValue((Object) null));
assertEquals(4L, reader.next((Object) null));
assertEquals("four", reader.getCurrentValue((Object) null));
assertNull(reader.next((Object) null));
reader.close();
}
}
| 11,093 | 33.996845 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBytesWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
/**
* This is the unit test for BytesWritable.
*/
public class TestBytesWritable {
@Test
public void testSizeChange() throws Exception {
byte[] hadoop = "hadoop".getBytes();
BytesWritable buf = new BytesWritable(hadoop);
int size = buf.getLength();
int orig_capacity = buf.getCapacity();
buf.setSize(size*2);
int new_capacity = buf.getCapacity();
System.arraycopy(buf.getBytes(), 0, buf.getBytes(), size, size);
assertTrue(new_capacity >= size * 2);
assertEquals(size * 2, buf.getLength());
assertTrue(new_capacity != orig_capacity);
buf.setSize(size*4);
assertTrue(new_capacity != buf.getCapacity());
for(int i=0; i < size*2; ++i) {
assertEquals(hadoop[i%size], buf.getBytes()[i]);
}
// ensure that copyBytes is exactly the right length
assertEquals(size*4, buf.copyBytes().length);
// shrink the buffer
buf.setCapacity(1);
// make sure the size has been cut down too
assertEquals(1, buf.getLength());
// but that the data is still there
assertEquals(hadoop[0], buf.getBytes()[0]);
}
@Test
public void testHash() throws Exception {
byte[] owen = "owen".getBytes();
BytesWritable buf = new BytesWritable(owen);
assertEquals(4347922, buf.hashCode());
buf.setCapacity(10000);
assertEquals(4347922, buf.hashCode());
buf.setSize(0);
assertEquals(1, buf.hashCode());
}
@Test
public void testCompare() throws Exception {
byte[][] values = new byte[][]{"abc".getBytes(),
"ad".getBytes(),
"abcd".getBytes(),
"".getBytes(),
"b".getBytes()};
BytesWritable[] buf = new BytesWritable[values.length];
for(int i=0; i < values.length; ++i) {
buf[i] = new BytesWritable(values[i]);
}
// check to make sure the compare function is symetric and reflexive
for(int i=0; i < values.length; ++i) {
for(int j=0; j < values.length; ++j) {
assertTrue(buf[i].compareTo(buf[j]) == -buf[j].compareTo(buf[i]));
assertTrue((i == j) == (buf[i].compareTo(buf[j]) == 0));
}
}
assertTrue(buf[0].compareTo(buf[1]) < 0);
assertTrue(buf[1].compareTo(buf[2]) > 0);
assertTrue(buf[2].compareTo(buf[3]) > 0);
assertTrue(buf[3].compareTo(buf[4]) < 0);
}
private void checkToString(byte[] input, String expected) {
String actual = new BytesWritable(input).toString();
assertEquals(expected, actual);
}
@Test
public void testToString() {
checkToString(new byte[]{0,1,2,0x10}, "00 01 02 10");
checkToString(new byte[]{-0x80, -0x7f, -0x1, -0x2, 1, 0},
"80 81 ff fe 01 00");
}
/**
* This test was written as result of adding the new zero
* copy constructor and set method to BytesWritable. These
* methods allow users to specify the backing buffer of the
* BytesWritable instance and a length.
*/
@Test
public void testZeroCopy() {
byte[] bytes = "brock".getBytes();
BytesWritable zeroBuf = new BytesWritable(bytes, bytes.length); // new
BytesWritable copyBuf = new BytesWritable(bytes); // old
// using zero copy constructor shouldn't result in a copy
assertTrue("copy took place, backing array != array passed to constructor",
bytes == zeroBuf.getBytes());
assertTrue("length of BW should backing byte array", zeroBuf.getLength() == bytes.length);
assertEquals("objects with same backing array should be equal", zeroBuf, copyBuf);
assertEquals("string repr of objects with same backing array should be equal",
zeroBuf.toString(), copyBuf.toString());
assertTrue("compare order objects with same backing array should be equal",
zeroBuf.compareTo(copyBuf) == 0);
assertTrue("hash of objects with same backing array should be equal",
zeroBuf.hashCode() == copyBuf.hashCode());
// ensure expanding buffer is handled correctly
// for buffers created with zero copy api
byte[] buffer = new byte[bytes.length * 5];
zeroBuf.set(buffer, 0, buffer.length); // expand internal buffer
zeroBuf.set(bytes, 0, bytes.length); // set back to normal contents
assertEquals("buffer created with (array, len) has bad contents",
zeroBuf, copyBuf);
assertTrue("buffer created with (array, len) has bad length",
zeroBuf.getLength() == copyBuf.getLength());
}
/**
* test {@link ByteWritable}
* methods compareTo(), toString(), equals()
*/
@Test
public void testObjectCommonMethods() {
byte b = 0x9;
ByteWritable bw = new ByteWritable();
bw.set(b);
assertTrue("testSetByteWritable error", bw.get() == b);
assertTrue("testSetByteWritable error < 0", bw.compareTo(new ByteWritable((byte)0xA)) < 0);
assertTrue("testSetByteWritable error > 0", bw.compareTo(new ByteWritable((byte)0x8)) > 0);
assertTrue("testSetByteWritable error == 0", bw.compareTo(new ByteWritable((byte)0x9)) == 0);
assertTrue("testSetByteWritable equals error !!!", bw.equals(new ByteWritable((byte)0x9)));
assertTrue("testSetByteWritable equals error !!!", ! bw.equals(new ByteWritable((byte)0xA)));
assertTrue("testSetByteWritable equals error !!!", ! bw.equals(new IntWritable(1)));
assertEquals("testSetByteWritable error ", "9", bw.toString());
}
}
| 6,344 | 39.414013 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFileSync.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
public class TestSequenceFileSync {
private static final int NUMRECORDS = 2000;
private static final int RECORDSIZE = 80;
private static final Random rand = new Random();
private final static String REC_FMT = "%d RECORDID %d : ";
private static void forOffset(SequenceFile.Reader reader,
IntWritable key, Text val, int iter, long off, int expectedRecord)
throws IOException {
val.clear();
reader.sync(off);
reader.next(key, val);
assertEquals(key.get(), expectedRecord);
final String test = String.format(REC_FMT, expectedRecord, expectedRecord);
assertEquals("Invalid value " + val, 0, val.find(test, 0));
}
@Test
public void testLowSyncpoint() throws IOException {
final Configuration conf = new Configuration();
final FileSystem fs = FileSystem.getLocal(conf);
final Path path = new Path(System.getProperty("test.build.data", "/tmp"),
"sequencefile.sync.test");
final IntWritable input = new IntWritable();
final Text val = new Text();
SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, path,
IntWritable.class, Text.class);
try {
writeSequenceFile(writer, NUMRECORDS);
for (int i = 0; i < 5 ; i++) {
final SequenceFile.Reader reader;
//try different SequenceFile.Reader constructors
if (i % 2 == 0) {
reader = new SequenceFile.Reader(fs, path, conf);
} else {
final FSDataInputStream in = fs.open(path);
final long length = fs.getFileStatus(path).getLen();
final int buffersize = conf.getInt("io.file.buffer.size", 4096);
reader = new SequenceFile.Reader(in, buffersize, 0L, length, conf);
}
try {
forOffset(reader, input, val, i, 0, 0);
forOffset(reader, input, val, i, 65, 0);
forOffset(reader, input, val, i, 2000, 21);
forOffset(reader, input, val, i, 0, 0);
} finally {
reader.close();
}
}
} finally {
fs.delete(path, false);
}
}
public static void writeSequenceFile(SequenceFile.Writer writer,
int numRecords) throws IOException {
final IntWritable key = new IntWritable();
final Text val = new Text();
for (int numWritten = 0; numWritten < numRecords; ++numWritten) {
key.set(numWritten);
randomText(val, numWritten, RECORDSIZE);
writer.append(key, val);
}
writer.close();
}
static void randomText(Text val, int id, int recordSize) {
val.clear();
final StringBuilder ret = new StringBuilder(recordSize);
ret.append(String.format(REC_FMT, id, id));
recordSize -= ret.length();
for (int i = 0; i < recordSize; ++i) {
ret.append(rand.nextInt(9));
}
val.set(ret.toString());
}
}
| 3,909 | 33.60177 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ReflectionUtils;
import junit.framework.TestCase;
/** Unit tests for Writable. */
public class TestWritable extends TestCase {
private static final String TEST_CONFIG_PARAM = "frob.test";
private static final String TEST_CONFIG_VALUE = "test";
private static final String TEST_WRITABLE_CONFIG_PARAM = "test.writable";
private static final String TEST_WRITABLE_CONFIG_VALUE = TEST_CONFIG_VALUE;
public TestWritable(String name) { super(name); }
/** Example class used in test cases below. */
public static class SimpleWritable implements Writable {
private static final Random RANDOM = new Random();
int state = RANDOM.nextInt();
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(state);
}
@Override
public void readFields(DataInput in) throws IOException {
this.state = in.readInt();
}
public static SimpleWritable read(DataInput in) throws IOException {
SimpleWritable result = new SimpleWritable();
result.readFields(in);
return result;
}
/** Required by test code, below. */
@Override
public boolean equals(Object o) {
if (!(o instanceof SimpleWritable))
return false;
SimpleWritable other = (SimpleWritable)o;
return this.state == other.state;
}
}
public static class SimpleWritableComparable extends SimpleWritable
implements WritableComparable<SimpleWritableComparable>, Configurable {
private Configuration conf;
public SimpleWritableComparable() {}
public void setConf(Configuration conf) {
this.conf = conf;
}
public Configuration getConf() {
return this.conf;
}
public int compareTo(SimpleWritableComparable o) {
return this.state - o.state;
}
}
/** Test 1: Check that SimpleWritable. */
public void testSimpleWritable() throws Exception {
testWritable(new SimpleWritable());
}
public void testByteWritable() throws Exception {
testWritable(new ByteWritable((byte)128));
}
public void testShortWritable() throws Exception {
testWritable(new ShortWritable((byte)256));
}
public void testDoubleWritable() throws Exception {
testWritable(new DoubleWritable(1.0));
}
/** Utility method for testing writables. */
public static Writable testWritable(Writable before)
throws Exception {
return testWritable(before, null);
}
/** Utility method for testing writables. */
public static Writable testWritable(Writable before
, Configuration conf) throws Exception {
DataOutputBuffer dob = new DataOutputBuffer();
before.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), dob.getLength());
Writable after = (Writable)ReflectionUtils.newInstance(
before.getClass(), conf);
after.readFields(dib);
assertEquals(before, after);
return after;
}
private static class FrobComparator extends WritableComparator {
public FrobComparator() { super(Frob.class); }
@Override public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
return 0;
}
}
private static class Frob implements WritableComparable<Frob> {
static { // register default comparator
WritableComparator.define(Frob.class, new FrobComparator());
}
@Override public void write(DataOutput out) throws IOException {}
@Override public void readFields(DataInput in) throws IOException {}
@Override public int compareTo(Frob o) { return 0; }
}
/** Test that comparator is defined and configured. */
public static void testGetComparator() throws Exception {
Configuration conf = new Configuration();
// Without conf.
WritableComparator frobComparator = WritableComparator.get(Frob.class);
assert(frobComparator instanceof FrobComparator);
assertNotNull(frobComparator.getConf());
assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));
// With conf.
conf.set(TEST_CONFIG_PARAM, TEST_CONFIG_VALUE);
frobComparator = WritableComparator.get(Frob.class, conf);
assert(frobComparator instanceof FrobComparator);
assertNotNull(frobComparator.getConf());
assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);
// Without conf. should reuse configuration.
frobComparator = WritableComparator.get(Frob.class);
assert(frobComparator instanceof FrobComparator);
assertNotNull(frobComparator.getConf());
assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);
// New conf. should use new configuration.
frobComparator = WritableComparator.get(Frob.class, new Configuration());
assert(frobComparator instanceof FrobComparator);
assertNotNull(frobComparator.getConf());
assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));
}
/**
* Test a user comparator that relies on deserializing both arguments for each
* compare.
*/
public void testShortWritableComparator() throws Exception {
ShortWritable writable1 = new ShortWritable((short)256);
ShortWritable writable2 = new ShortWritable((short) 128);
ShortWritable writable3 = new ShortWritable((short) 256);
final String SHOULD_NOT_MATCH_WITH_RESULT_ONE = "Result should be 1, should not match the writables";
assertTrue(SHOULD_NOT_MATCH_WITH_RESULT_ONE,
writable1.compareTo(writable2) == 1);
assertTrue(SHOULD_NOT_MATCH_WITH_RESULT_ONE, WritableComparator.get(
ShortWritable.class).compare(writable1, writable2) == 1);
final String SHOULD_NOT_MATCH_WITH_RESULT_MINUS_ONE = "Result should be -1, should not match the writables";
assertTrue(SHOULD_NOT_MATCH_WITH_RESULT_MINUS_ONE, writable2
.compareTo(writable1) == -1);
assertTrue(SHOULD_NOT_MATCH_WITH_RESULT_MINUS_ONE, WritableComparator.get(
ShortWritable.class).compare(writable2, writable1) == -1);
final String SHOULD_MATCH = "Result should be 0, should match the writables";
assertTrue(SHOULD_MATCH, writable1.compareTo(writable1) == 0);
assertTrue(SHOULD_MATCH, WritableComparator.get(ShortWritable.class)
.compare(writable1, writable3) == 0);
}
/**
* Test that Writable's are configured by Comparator.
*/
public void testConfigurableWritableComparator() throws Exception {
Configuration conf = new Configuration();
conf.set(TEST_WRITABLE_CONFIG_PARAM, TEST_WRITABLE_CONFIG_VALUE);
WritableComparator wc = WritableComparator.get(SimpleWritableComparable.class, conf);
SimpleWritableComparable key = ((SimpleWritableComparable)wc.newKey());
assertNotNull(wc.getConf());
assertNotNull(key.getConf());
assertEquals(key.getConf().get(TEST_WRITABLE_CONFIG_PARAM), TEST_WRITABLE_CONFIG_VALUE);
}
}
| 7,878 | 34.813636 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.conf.*;
/** Support for flat files of binary key/value pairs. */
public class TestArrayFile extends TestCase {
private static final Log LOG = LogFactory.getLog(TestArrayFile.class);
private static final Path TEST_DIR = new Path(
System.getProperty("test.build.data", "/tmp"),
TestMapFile.class.getSimpleName());
private static String TEST_FILE = new Path(TEST_DIR, "test.array").toString();
public TestArrayFile(String name) {
super(name);
}
public void testArrayFile() throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
RandomDatum[] data = generate(10000);
writeTest(fs, data, TEST_FILE);
readTest(fs, data, TEST_FILE, conf);
}
public void testEmptyFile() throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
writeTest(fs, new RandomDatum[0], TEST_FILE);
ArrayFile.Reader reader = new ArrayFile.Reader(fs, TEST_FILE, conf);
assertNull(reader.get(0, new RandomDatum()));
reader.close();
}
private static RandomDatum[] generate(int count) {
if(LOG.isDebugEnabled()) {
LOG.debug("generating " + count + " records in debug");
}
RandomDatum[] data = new RandomDatum[count];
RandomDatum.Generator generator = new RandomDatum.Generator();
for (int i = 0; i < count; i++) {
generator.next();
data[i] = generator.getValue();
}
return data;
}
private static void writeTest(FileSystem fs, RandomDatum[] data, String file)
throws IOException {
Configuration conf = new Configuration();
MapFile.delete(fs, file);
if(LOG.isDebugEnabled()) {
LOG.debug("creating with " + data.length + " debug");
}
ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs, file, RandomDatum.class);
writer.setIndexInterval(100);
for (int i = 0; i < data.length; i++)
writer.append(data[i]);
writer.close();
}
private static void readTest(FileSystem fs, RandomDatum[] data, String file, Configuration conf)
throws IOException {
RandomDatum v = new RandomDatum();
if(LOG.isDebugEnabled()) {
LOG.debug("reading " + data.length + " debug");
}
ArrayFile.Reader reader = new ArrayFile.Reader(fs, file, conf);
try {
for (int i = 0; i < data.length; i++) { // try forwards
reader.get(i, v);
if (!v.equals(data[i])) {
throw new RuntimeException("wrong value at " + i);
}
}
for (int i = data.length-1; i >= 0; i--) { // then backwards
reader.get(i, v);
if (!v.equals(data[i])) {
throw new RuntimeException("wrong value at " + i);
}
}
if(LOG.isDebugEnabled()) {
LOG.debug("done reading " + data.length + " debug");
}
} finally {
reader.close();
}
}
/**
* test on {@link ArrayFile.Reader} iteration methods
* <pre>
* {@code next(), seek()} in and out of range.
* </pre>
*/
public void testArrayFileIteration() {
int SIZE = 10;
Configuration conf = new Configuration();
try {
FileSystem fs = FileSystem.get(conf);
ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs, TEST_FILE,
LongWritable.class, CompressionType.RECORD, defaultProgressable);
assertNotNull("testArrayFileIteration error !!!", writer);
for (int i = 0; i < SIZE; i++)
writer.append(new LongWritable(i));
writer.close();
ArrayFile.Reader reader = new ArrayFile.Reader(fs, TEST_FILE, conf);
LongWritable nextWritable = new LongWritable(0);
for (int i = 0; i < SIZE; i++) {
nextWritable = (LongWritable)reader.next(nextWritable);
assertEquals(nextWritable.get(), i);
}
assertTrue("testArrayFileIteration seek error !!!",
reader.seek(new LongWritable(6)));
nextWritable = (LongWritable) reader.next(nextWritable);
assertTrue("testArrayFileIteration error !!!", reader.key() == 7);
assertTrue("testArrayFileIteration error !!!",
nextWritable.equals(new LongWritable(7)));
assertFalse("testArrayFileIteration error !!!",
reader.seek(new LongWritable(SIZE + 5)));
reader.close();
} catch (Exception ex) {
fail("testArrayFileWriterConstruction error !!!");
}
}
/** For debugging and testing. */
public static void main(String[] args) throws Exception {
int count = 1024 * 1024;
boolean create = true;
boolean check = true;
String file = TEST_FILE;
String usage = "Usage: TestArrayFile [-count N] [-nocreate] [-nocheck] file";
if (args.length == 0) {
System.err.println(usage);
System.exit(-1);
}
Configuration conf = new Configuration();
int i = 0;
Path fpath = null;
FileSystem fs = null;
try {
for (; i < args.length; i++) { // parse command line
if (args[i] == null) {
continue;
} else if (args[i].equals("-count")) {
count = Integer.parseInt(args[++i]);
} else if (args[i].equals("-nocreate")) {
create = false;
} else if (args[i].equals("-nocheck")) {
check = false;
} else {
// file is required parameter
file = args[i];
fpath=new Path(file);
}
}
fs = fpath.getFileSystem(conf);
LOG.info("count = " + count);
LOG.info("create = " + create);
LOG.info("check = " + check);
LOG.info("file = " + file);
RandomDatum[] data = generate(count);
if (create) {
writeTest(fs, data, file);
}
if (check) {
readTest(fs, data, file, conf);
}
} finally {
fs.close();
}
}
private static final Progressable defaultProgressable = new Progressable() {
@Override
public void progress() {
}
};
}
| 7,086 | 31.213636 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestDefaultStringifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
public class TestDefaultStringifier extends TestCase {
private static Configuration conf = new Configuration();
private static final Log LOG = LogFactory.getLog(TestDefaultStringifier.class);
private char[] alphabet = "abcdefghijklmnopqrstuvwxyz".toCharArray();
public void testWithWritable() throws Exception {
conf.set("io.serializations", "org.apache.hadoop.io.serializer.WritableSerialization");
LOG.info("Testing DefaultStringifier with Text");
Random random = new Random();
//test with a Text
for(int i=0;i<10;i++) {
//generate a random string
StringBuilder builder = new StringBuilder();
int strLen = random.nextInt(40);
for(int j=0; j< strLen; j++) {
builder.append(alphabet[random.nextInt(alphabet.length)]);
}
Text text = new Text(builder.toString());
DefaultStringifier<Text> stringifier = new DefaultStringifier<Text>(conf, Text.class);
String str = stringifier.toString(text);
Text claimedText = stringifier.fromString(str);
LOG.info("Object: " + text);
LOG.info("String representation of the object: " + str);
assertEquals(text, claimedText);
}
}
public void testWithJavaSerialization() throws Exception {
conf.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization");
LOG.info("Testing DefaultStringifier with Serializable Integer");
//Integer implements Serializable
Integer testInt = Integer.valueOf(42);
DefaultStringifier<Integer> stringifier = new DefaultStringifier<Integer>(conf, Integer.class);
String str = stringifier.toString(testInt);
Integer claimedInt = stringifier.fromString(str);
LOG.info("String representation of the object: " + str);
assertEquals(testInt, claimedInt);
}
public void testStoreLoad() throws IOException {
LOG.info("Testing DefaultStringifier#store() and #load()");
conf.set("io.serializations", "org.apache.hadoop.io.serializer.WritableSerialization");
Text text = new Text("uninteresting test string");
String keyName = "test.defaultstringifier.key1";
DefaultStringifier.store(conf,text, keyName);
Text claimedText = DefaultStringifier.load(conf, keyName, Text.class);
assertEquals("DefaultStringifier#load() or #store() might be flawed"
, text, claimedText);
}
public void testStoreLoadArray() throws IOException {
LOG.info("Testing DefaultStringifier#storeArray() and #loadArray()");
conf.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization");
String keyName = "test.defaultstringifier.key2";
Integer[] array = new Integer[] {1,2,3,4,5};
DefaultStringifier.storeArray(conf, array, keyName);
Integer[] claimedArray = DefaultStringifier.<Integer>loadArray(conf, keyName, Integer.class);
for (int i = 0; i < array.length; i++) {
assertEquals("two arrays are not equal", array[i], claimedArray[i]);
}
}
}
| 4,019 | 34.263158 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.util.Map;
import junit.framework.TestCase;
/**
* Tests MapWritable
*/
public class TestMapWritable extends TestCase {
/** the test */
@SuppressWarnings("unchecked")
public void testMapWritable() {
Text[] keys = {
new Text("key1"),
new Text("key2"),
new Text("Key3"),
};
BytesWritable[] values = {
new BytesWritable("value1".getBytes()),
new BytesWritable("value2".getBytes()),
new BytesWritable("value3".getBytes())
};
MapWritable inMap = new MapWritable();
for (int i = 0; i < keys.length; i++) {
inMap.put(keys[i], values[i]);
}
MapWritable outMap = new MapWritable(inMap);
assertEquals(inMap.size(), outMap.size());
for (Map.Entry<Writable, Writable> e: inMap.entrySet()) {
assertTrue(outMap.containsKey(e.getKey()));
assertEquals(0, ((WritableComparable) outMap.get(e.getKey())).compareTo(
e.getValue()));
}
// Now for something a little harder...
Text[] maps = {
new Text("map1"),
new Text("map2")
};
MapWritable mapOfMaps = new MapWritable();
mapOfMaps.put(maps[0], inMap);
mapOfMaps.put(maps[1], outMap);
MapWritable copyOfMapOfMaps = new MapWritable(mapOfMaps);
for (int i = 0; i < maps.length; i++) {
assertTrue(copyOfMapOfMaps.containsKey(maps[i]));
MapWritable a = (MapWritable) mapOfMaps.get(maps[i]);
MapWritable b = (MapWritable) copyOfMapOfMaps.get(maps[i]);
assertEquals(a.size(), b.size());
for (Writable key: a.keySet()) {
assertTrue(b.containsKey(key));
// This will work because we know what we put into each set
WritableComparable aValue = (WritableComparable) a.get(key);
WritableComparable bValue = (WritableComparable) b.get(key);
assertEquals(0, aValue.compareTo(bValue));
}
}
}
/**
* Test that number of "unknown" classes is propagated across multiple copies.
*/
@SuppressWarnings("deprecation")
public void testForeignClass() {
MapWritable inMap = new MapWritable();
inMap.put(new Text("key"), new UTF8("value"));
inMap.put(new Text("key2"), new UTF8("value2"));
MapWritable outMap = new MapWritable(inMap);
MapWritable copyOfCopy = new MapWritable(outMap);
assertEquals(1, copyOfCopy.getNewClasses());
}
/**
* Assert MapWritable does not grow across calls to readFields.
* @throws Exception
* @see <a href="https://issues.apache.org/jira/browse/HADOOP-2244">HADOOP-2244</a>
*/
public void testMultipleCallsToReadFieldsAreSafe() throws Exception {
// Create an instance and add a key/value.
MapWritable m = new MapWritable();
final Text t = new Text(getName());
m.put(t, t);
// Get current size of map. Key values are 't'.
int count = m.size();
// Now serialize... save off the bytes.
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
m.write(dos);
dos.close();
// Now add new values to the MapWritable.
m.put(new Text("key1"), new Text("value1"));
m.put(new Text("key2"), new Text("value2"));
// Now deserialize the original MapWritable. Ensure count and key values
// match original state.
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
DataInputStream dis = new DataInputStream(bais);
m.readFields(dis);
assertEquals(count, m.size());
assertTrue(m.get(t).equals(t));
dis.close();
}
public void testEquality() {
MapWritable map1 = new MapWritable();
MapWritable map2 = new MapWritable();
MapWritable map3 = new MapWritable();
final IntWritable k1 = new IntWritable(5);
final IntWritable k2 = new IntWritable(10);
final Text value = new Text("value");
map1.put(k1, value); // equal
map2.put(k1, value); // equal
map3.put(k2, value); // not equal
assertTrue(map1.equals(map2));
assertTrue(map2.equals(map1));
assertFalse(map1.equals(map3));
assertEquals(map1.hashCode(), map2.hashCode());
assertFalse(map1.hashCode() == map3.hashCode());
}
/** Verify text command outputs a useful representation for MapWritable. */
public void testToString() {
MapWritable map = new MapWritable();
final IntWritable key = new IntWritable(5);
final Text value = new Text("value");
map.put(key, value);
assertEquals("{5=value}", map.toString());
}
}
| 5,486 | 32.87037 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import junit.framework.TestCase;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.UTFDataFormatException;
import java.nio.ByteBuffer;
import java.util.Random;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
/** Unit tests for UTF8. */
@SuppressWarnings("deprecation")
public class TestUTF8 extends TestCase {
public TestUTF8(String name) { super(name); }
private static final Random RANDOM = new Random();
public static String getTestString() throws Exception {
StringBuilder buffer = new StringBuilder();
int length = RANDOM.nextInt(100);
for (int i = 0; i < length; i++) {
buffer.append((char)(RANDOM.nextInt(Character.MAX_VALUE)));
}
return buffer.toString();
}
public void testWritable() throws Exception {
for (int i = 0; i < 10000; i++) {
TestWritable.testWritable(new UTF8(getTestString()));
}
}
public void testGetBytes() throws Exception {
for (int i = 0; i < 10000; i++) {
// generate a random string
String before = getTestString();
// Check that the bytes are stored correctly in Modified-UTF8 format.
// Note that the DataInput and DataOutput interfaces convert between
// bytes and Strings using the Modified-UTF8 format.
assertEquals(before, readModifiedUTF(UTF8.getBytes(before)));
}
}
private String readModifiedUTF(byte[] bytes) throws IOException {
final short lengthBytes = (short)2;
ByteBuffer bb = ByteBuffer.allocate(bytes.length + lengthBytes);
bb.putShort((short)bytes.length).put(bytes);
ByteArrayInputStream bis = new ByteArrayInputStream(bb.array());
DataInputStream dis = new DataInputStream(bis);
return dis.readUTF();
}
public void testIO() throws Exception {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
for (int i = 0; i < 10000; i++) {
// generate a random string
String before = getTestString();
// write it
out.reset();
UTF8.writeString(out, before);
// test that it reads correctly
in.reset(out.getData(), out.getLength());
String after = UTF8.readString(in);
assertEquals(before, after);
// test that it reads correctly with DataInput
in.reset(out.getData(), out.getLength());
String after2 = in.readUTF();
assertEquals(before, after2);
}
}
public void testNullEncoding() throws Exception {
String s = new String(new char[] { 0 });
DataOutputBuffer dob = new DataOutputBuffer();
new UTF8(s).write(dob);
assertEquals(s, new String(dob.getData(), 2, dob.getLength()-2, "UTF-8"));
}
/**
* Test encoding and decoding of UTF8 outside the basic multilingual plane.
*
* This is a regression test for HADOOP-9103.
*/
public void testNonBasicMultilingualPlane() throws Exception {
// Test using the "CAT FACE" character (U+1F431)
// See http://www.fileformat.info/info/unicode/char/1f431/index.htm
String catFace = "\uD83D\uDC31";
// This encodes to 4 bytes in UTF-8:
byte[] encoded = catFace.getBytes("UTF-8");
assertEquals(4, encoded.length);
assertEquals("f09f90b1", StringUtils.byteToHexString(encoded));
// Decode back to String using our own decoder
String roundTrip = UTF8.fromBytes(encoded);
assertEquals(catFace, roundTrip);
}
/**
* Test that decoding invalid UTF8 throws an appropriate error message.
*/
public void testInvalidUTF8() throws Exception {
byte[] invalid = new byte[] {
0x01, 0x02, (byte)0xff, (byte)0xff, 0x01, 0x02, 0x03, 0x04, 0x05 };
try {
UTF8.fromBytes(invalid);
fail("did not throw an exception");
} catch (UTFDataFormatException utfde) {
GenericTestUtils.assertExceptionContains(
"Invalid UTF8 at ffff01020304", utfde);
}
}
/**
* Test for a 5-byte UTF8 sequence, which is now considered illegal.
*/
public void test5ByteUtf8Sequence() throws Exception {
byte[] invalid = new byte[] {
0x01, 0x02, (byte)0xf8, (byte)0x88, (byte)0x80,
(byte)0x80, (byte)0x80, 0x04, 0x05 };
try {
UTF8.fromBytes(invalid);
fail("did not throw an exception");
} catch (UTFDataFormatException utfde) {
GenericTestUtils.assertExceptionContains(
"Invalid UTF8 at f88880808004", utfde);
}
}
/**
* Test that decoding invalid UTF8 due to truncation yields the correct
* exception type.
*/
public void testInvalidUTF8Truncated() throws Exception {
// Truncated CAT FACE character -- this is a 4-byte sequence, but we
// only have the first three bytes.
byte[] truncated = new byte[] {
(byte)0xF0, (byte)0x9F, (byte)0x90 };
try {
UTF8.fromBytes(truncated);
fail("did not throw an exception");
} catch (UTFDataFormatException utfde) {
GenericTestUtils.assertExceptionContains(
"Truncated UTF8 at f09f90", utfde);
}
}
}
| 5,879 | 31.849162 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestTextNonUTF8.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import junit.framework.TestCase;
import java.nio.charset.MalformedInputException;
import java.util.Arrays;
/** Unit tests for NonUTF8. */
public class TestTextNonUTF8 extends TestCase {
public void testNonUTF8() throws Exception{
// this is a non UTF8 byte array
byte b[] = {-0x01, -0x01, -0x01, -0x01, -0x01, -0x01, -0x01};
boolean nonUTF8 = false;
Text t = new Text(b);
try{
Text.validateUTF8(b);
}catch(MalformedInputException me){
nonUTF8 = false;
}
// asserting that the byte array is non utf8
assertFalse(nonUTF8);
byte ret[] = t.getBytes();
// asseting that the byte array are the same when the Text
// object is created.
assertTrue(Arrays.equals(b, ret));
}
public static void main(String[] args) throws Exception
{
TestTextNonUTF8 test = new TestTextNonUTF8();
test.testNonUTF8();
}
}
| 1,720 | 31.471698 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import static org.mockito.Mockito.*;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.util.Progressable;
import org.junit.Assert;
public class TestBloomMapFile extends TestCase {
private static Configuration conf = new Configuration();
private static final Path TEST_ROOT = new Path(
System.getProperty("test.build.data", "/tmp"),
TestMapFile.class.getSimpleName());
private static final Path TEST_DIR = new Path(TEST_ROOT, "testfile");
private static final Path TEST_FILE = new Path(TEST_ROOT, "testfile");
@Override
public void setUp() throws Exception {
LocalFileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) {
Assert.fail("Can't clean up test root dir");
}
fs.mkdirs(TEST_ROOT);
}
@SuppressWarnings("deprecation")
public void testMembershipTest() throws Exception {
// write the file
FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(TEST_DIR);
conf.setInt("io.mapfile.bloom.size", 2048);
BloomMapFile.Writer writer = null;
BloomMapFile.Reader reader = null;
try {
writer = new BloomMapFile.Writer(conf, fs, qualifiedDirName.toString(),
IntWritable.class, Text.class);
IntWritable key = new IntWritable();
Text value = new Text();
for (int i = 0; i < 2000; i += 2) {
key.set(i);
value.set("00" + i);
writer.append(key, value);
}
writer.close();
reader = new BloomMapFile.Reader(fs, qualifiedDirName.toString(), conf);
// check false positives rate
int falsePos = 0;
int falseNeg = 0;
for (int i = 0; i < 2000; i++) {
key.set(i);
boolean exists = reader.probablyHasKey(key);
if (i % 2 == 0) {
if (!exists)
falseNeg++;
} else {
if (exists)
falsePos++;
}
}
reader.close();
fs.delete(qualifiedDirName, true);
System.out.println("False negatives: " + falseNeg);
assertEquals(0, falseNeg);
System.out.println("False positives: " + falsePos);
assertTrue(falsePos < 2);
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
@SuppressWarnings("deprecation")
private void checkMembershipVaryingSizedKeys(String name, List<Text> keys)
throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(TEST_DIR);
BloomMapFile.Writer writer = null;
BloomMapFile.Reader reader = null;
try {
writer = new BloomMapFile.Writer(conf, fs, qualifiedDirName.toString(),
Text.class, NullWritable.class);
for (Text key : keys) {
writer.append(key, NullWritable.get());
}
writer.close();
// will check for membership in opposite order of how keys were inserted
reader = new BloomMapFile.Reader(fs, qualifiedDirName.toString(), conf);
Collections.reverse(keys);
for (Text key : keys) {
assertTrue("False negative for existing key " + key,
reader.probablyHasKey(key));
}
reader.close();
fs.delete(qualifiedDirName, true);
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
public void testMembershipVaryingSizedKeysTest1() throws Exception {
ArrayList<Text> list = new ArrayList<Text>();
list.add(new Text("A"));
list.add(new Text("BB"));
checkMembershipVaryingSizedKeys(getName(), list);
}
public void testMembershipVaryingSizedKeysTest2() throws Exception {
ArrayList<Text> list = new ArrayList<Text>();
list.add(new Text("AA"));
list.add(new Text("B"));
checkMembershipVaryingSizedKeys(getName(), list);
}
/**
* test {@code BloomMapFile.delete()} method
*/
public void testDeleteFile() {
BloomMapFile.Writer writer = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
writer = new BloomMapFile.Writer(conf, TEST_FILE,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
assertNotNull("testDeleteFile error !!!", writer);
writer.close();
BloomMapFile.delete(fs, TEST_FILE.toString());
} catch (Exception ex) {
fail("unexpect ex in testDeleteFile !!!");
} finally {
IOUtils.cleanup(null, writer);
}
}
/**
* test {@link BloomMapFile.Reader} constructor with
* IOException
*/
public void testIOExceptionInWriterConstructor() {
Path dirNameSpy = spy(TEST_FILE);
BloomMapFile.Reader reader = null;
BloomMapFile.Writer writer = null;
try {
writer = new BloomMapFile.Writer(conf, TEST_FILE,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
writer.append(new IntWritable(1), new Text("123124142"));
writer.close();
when(dirNameSpy.getFileSystem(conf)).thenThrow(new IOException());
reader = new BloomMapFile.Reader(dirNameSpy, conf,
MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));
assertNull("testIOExceptionInWriterConstructor error !!!",
reader.getBloomFilter());
} catch (Exception ex) {
fail("unexpect ex in testIOExceptionInWriterConstructor !!!");
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
/**
* test {@link BloomMapFile.Reader.get()} method
*/
public void testGetBloomMapFile() {
int SIZE = 10;
BloomMapFile.Reader reader = null;
BloomMapFile.Writer writer = null;
try {
writer = new BloomMapFile.Writer(conf, TEST_FILE,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
for (int i = 0; i < SIZE; i++) {
writer.append(new IntWritable(i), new Text());
}
writer.close();
reader = new BloomMapFile.Reader(TEST_FILE, conf,
MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));
for (int i = 0; i < SIZE; i++) {
assertNotNull("testGetBloomMapFile error !!!",
reader.get(new IntWritable(i), new Text()));
}
assertNull("testGetBloomMapFile error !!!",
reader.get(new IntWritable(SIZE + 5), new Text()));
} catch (Exception ex) {
fail("unexpect ex in testGetBloomMapFile !!!");
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
/**
* test {@code BloomMapFile.Writer} constructors
*/
@SuppressWarnings("deprecation")
public void testBloomMapFileConstructors() {
BloomMapFile.Writer writer = null;
try {
FileSystem ts = FileSystem.get(conf);
String testFileName = TEST_FILE.toString();
writer = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.BLOCK,
defaultCodec, defaultProgress);
assertNotNull("testBloomMapFileConstructors error !!!", writer);
writer.close();
writer = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.BLOCK,
defaultProgress);
assertNotNull("testBloomMapFileConstructors error !!!", writer);
writer.close();
writer = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.BLOCK);
assertNotNull("testBloomMapFileConstructors error !!!", writer);
writer.close();
writer = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.RECORD,
defaultCodec, defaultProgress);
assertNotNull("testBloomMapFileConstructors error !!!", writer);
writer.close();
writer = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.RECORD,
defaultProgress);
assertNotNull("testBloomMapFileConstructors error !!!", writer);
writer.close();
writer = new BloomMapFile.Writer(conf, ts,
testFileName, IntWritable.class, Text.class, CompressionType.RECORD);
assertNotNull("testBloomMapFileConstructors error !!!", writer);
writer.close();
writer = new BloomMapFile.Writer(conf, ts,
testFileName, WritableComparator.get(Text.class), Text.class);
assertNotNull("testBloomMapFileConstructors error !!!", writer);
writer.close();
} catch (Exception ex) {
fail("testBloomMapFileConstructors error !!!");
} finally {
IOUtils.cleanup(null, writer);
}
}
static final Progressable defaultProgress = new Progressable() {
@Override
public void progress() {
}
};
static final CompressionCodec defaultCodec = new CompressionCodec() {
@Override
public String getDefaultExtension() {
return null;
}
@Override
public Class<? extends Decompressor> getDecompressorType() {
return null;
}
@Override
public Class<? extends Compressor> getCompressorType() {
return null;
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor) throws IOException {
return mock(CompressionOutputStream.class);
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return mock(CompressionOutputStream.class);
}
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor) throws IOException {
return null;
}
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return null;
}
@Override
public Decompressor createDecompressor() {
return null;
}
@Override
public Compressor createCompressor() {
return null;
}
};
}
| 11,418 | 32.684366 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBooleanWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestBooleanWritable {
@Test
public void testCompareUnequalWritables() throws Exception {
DataOutputBuffer bTrue = writeWritable(new BooleanWritable(true));
DataOutputBuffer bFalse = writeWritable(new BooleanWritable(false));
WritableComparator writableComparator =
WritableComparator.get(BooleanWritable.class);
assertEquals(0, compare(writableComparator, bTrue, bTrue));
assertEquals(0, compare(writableComparator, bFalse, bFalse));
assertEquals(1, compare(writableComparator, bTrue, bFalse));
assertEquals(-1, compare(writableComparator, bFalse, bTrue));
}
private int compare(WritableComparator writableComparator,
DataOutputBuffer buf1, DataOutputBuffer buf2) {
return writableComparator.compare(buf1.getData(), 0, buf1.size(),
buf2.getData(), 0, buf2.size());
}
protected DataOutputBuffer writeWritable(Writable writable)
throws IOException {
DataOutputBuffer out = new DataOutputBuffer(1024);
writable.write(out);
out.flush();
return out;
}
/**
* test {@link BooleanWritable} methods hashCode(), equals(), compareTo()
*/
@Test
public void testCommonMethods() {
assertTrue("testCommonMethods1 error !!!", newInstance(true).equals(newInstance(true)));
assertTrue("testCommonMethods2 error !!!", newInstance(false).equals(newInstance(false)));
assertFalse("testCommonMethods3 error !!!", newInstance(false).equals(newInstance(true)));
assertTrue("testCommonMethods4 error !!!", checkHashCode(newInstance(true), newInstance(true)));
assertFalse("testCommonMethods5 error !!! ", checkHashCode(newInstance(true), newInstance(false)));
assertTrue("testCommonMethods6 error !!!", newInstance(true).compareTo(newInstance(false)) > 0 );
assertTrue("testCommonMethods7 error !!!", newInstance(false).compareTo(newInstance(true)) < 0 );
assertTrue("testCommonMethods8 error !!!", newInstance(false).compareTo(newInstance(false)) == 0 );
assertEquals("testCommonMethods9 error !!!", "true", newInstance(true).toString());
}
private boolean checkHashCode(BooleanWritable f, BooleanWritable s) {
return f.hashCode() == s.hashCode();
}
private static BooleanWritable newInstance(boolean flag) {
return new BooleanWritable(flag);
}
}
| 3,238 | 40.525641 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSecureIOUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestSecureIOUtils {
private static String realOwner, realGroup;
private static File testFilePathIs;
private static File testFilePathRaf;
private static File testFilePathFadis;
private static FileSystem fs;
@BeforeClass
public static void makeTestFile() throws Exception {
Configuration conf = new Configuration();
fs = FileSystem.getLocal(conf).getRaw();
testFilePathIs =
new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
+ "1")).toUri().getRawPath());
testFilePathRaf =
new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
+ "2")).toUri().getRawPath());
testFilePathFadis =
new File((new Path("target", TestSecureIOUtils.class.getSimpleName()
+ "3")).toUri().getRawPath());
for (File f : new File[] { testFilePathIs, testFilePathRaf,
testFilePathFadis }) {
FileOutputStream fos = new FileOutputStream(f);
fos.write("hello".getBytes("UTF-8"));
fos.close();
}
FileStatus stat = fs.getFileStatus(
new Path(testFilePathIs.toString()));
// RealOwner and RealGroup would be same for all three files.
realOwner = stat.getOwner();
realGroup = stat.getGroup();
}
@Test(timeout = 10000)
public void testReadUnrestricted() throws IOException {
SecureIOUtils.openForRead(testFilePathIs, null, null).close();
SecureIOUtils.openFSDataInputStream(testFilePathFadis, null, null).close();
SecureIOUtils.openForRandomRead(testFilePathRaf, "r", null, null).close();
}
@Test(timeout = 10000)
public void testReadCorrectlyRestrictedWithSecurity() throws IOException {
SecureIOUtils
.openForRead(testFilePathIs, realOwner, realGroup).close();
SecureIOUtils
.openFSDataInputStream(testFilePathFadis, realOwner, realGroup).close();
SecureIOUtils.openForRandomRead(testFilePathRaf, "r", realOwner, realGroup)
.close();
}
@Test(timeout = 10000)
public void testReadIncorrectlyRestrictedWithSecurity() throws IOException {
// this will only run if libs are available
assumeTrue(NativeIO.isAvailable());
System.out.println("Running test with native libs...");
String invalidUser = "InvalidUser";
// We need to make sure that forceSecure.. call works only if
// the file belongs to expectedOwner.
// InputStream
try {
SecureIOUtils
.forceSecureOpenForRead(testFilePathIs, invalidUser, realGroup)
.close();
fail("Didn't throw expection for wrong user ownership!");
} catch (IOException ioe) {
// expected
}
// FSDataInputStream
try {
SecureIOUtils
.forceSecureOpenFSDataInputStream(testFilePathFadis, invalidUser,
realGroup).close();
fail("Didn't throw expection for wrong user ownership!");
} catch (IOException ioe) {
// expected
}
// RandomAccessFile
try {
SecureIOUtils
.forceSecureOpenForRandomRead(testFilePathRaf, "r", invalidUser,
realGroup).close();
fail("Didn't throw expection for wrong user ownership!");
} catch (IOException ioe) {
// expected
}
}
@Test(timeout = 10000)
public void testCreateForWrite() throws IOException {
try {
SecureIOUtils.createForWrite(testFilePathIs, 0777);
fail("Was able to create file at " + testFilePathIs);
} catch (SecureIOUtils.AlreadyExistsException aee) {
// expected
}
}
@AfterClass
public static void removeTestFile() throws Exception {
// cleaning files
for (File f : new File[] { testFilePathIs, testFilePathRaf,
testFilePathFadis }) {
f.delete();
}
}
}
| 5,011 | 32.413333 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestDataByteBuffers.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Random;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestDataByteBuffers {
private static void readJunk(DataInput in, Random r, long seed, int iter)
throws IOException {
r.setSeed(seed);
for (int i = 0; i < iter; ++i) {
switch (r.nextInt(7)) {
case 0:
assertEquals((byte)(r.nextInt() & 0xFF), in.readByte()); break;
case 1:
assertEquals((short)(r.nextInt() & 0xFFFF), in.readShort()); break;
case 2:
assertEquals(r.nextInt(), in.readInt()); break;
case 3:
assertEquals(r.nextLong(), in.readLong()); break;
case 4:
assertEquals(Double.doubleToLongBits(r.nextDouble()),
Double.doubleToLongBits(in.readDouble())); break;
case 5:
assertEquals(Float.floatToIntBits(r.nextFloat()),
Float.floatToIntBits(in.readFloat())); break;
case 6:
int len = r.nextInt(1024);
byte[] vb = new byte[len];
r.nextBytes(vb);
byte[] b = new byte[len];
in.readFully(b, 0, len);
assertArrayEquals(vb, b);
break;
}
}
}
private static void writeJunk(DataOutput out, Random r, long seed, int iter)
throws IOException {
r.setSeed(seed);
for (int i = 0; i < iter; ++i) {
switch (r.nextInt(7)) {
case 0: out.writeByte(r.nextInt()); break;
case 1: out.writeShort((short)(r.nextInt() & 0xFFFF)); break;
case 2: out.writeInt(r.nextInt()); break;
case 3: out.writeLong(r.nextLong()); break;
case 4: out.writeDouble(r.nextDouble()); break;
case 5: out.writeFloat(r.nextFloat()); break;
case 6:
byte[] b = new byte[r.nextInt(1024)];
r.nextBytes(b);
out.write(b);
break;
}
}
}
@Test
public void testBaseBuffers() throws IOException {
DataOutputBuffer dob = new DataOutputBuffer();
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
writeJunk(dob, r, seed, 1000);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), 0, dob.getLength());
readJunk(dib, r, seed, 1000);
dob.reset();
writeJunk(dob, r, seed, 1000);
dib.reset(dob.getData(), 0, dob.getLength());
readJunk(dib, r, seed, 1000);
}
@Test
public void testByteBuffers() throws IOException {
DataOutputByteBuffer dob = new DataOutputByteBuffer();
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
writeJunk(dob, r, seed, 1000);
DataInputByteBuffer dib = new DataInputByteBuffer();
dib.reset(dob.getData());
readJunk(dib, r, seed, 1000);
dob.reset();
writeJunk(dob, r, seed, 1000);
dib.reset(dob.getData());
readJunk(dib, r, seed, 1000);
}
private static byte[] toBytes(ByteBuffer[] bufs, int len) {
byte[] ret = new byte[len];
int pos = 0;
for (int i = 0; i < bufs.length; ++i) {
int rem = bufs[i].remaining();
bufs[i].get(ret, pos, rem);
pos += rem;
}
return ret;
}
@Test
public void testDataOutputByteBufferCompatibility() throws IOException {
DataOutputBuffer dob = new DataOutputBuffer();
DataOutputByteBuffer dobb = new DataOutputByteBuffer();
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
writeJunk(dob, r, seed, 1000);
writeJunk(dobb, r, seed, 1000);
byte[] check = toBytes(dobb.getData(), dobb.getLength());
assertEquals(check.length, dob.getLength());
assertArrayEquals(check, Arrays.copyOf(dob.getData(), dob.getLength()));
dob.reset();
dobb.reset();
writeJunk(dob, r, seed, 3000);
writeJunk(dobb, r, seed, 3000);
check = toBytes(dobb.getData(), dobb.getLength());
assertEquals(check.length, dob.getLength());
assertArrayEquals(check, Arrays.copyOf(dob.getData(), dob.getLength()));
dob.reset();
dobb.reset();
writeJunk(dob, r, seed, 1000);
writeJunk(dobb, r, seed, 1000);
check = toBytes(dobb.getData(), dobb.getLength());
assertEquals("Failed Checking length = " + check.length,
check.length, dob.getLength());
assertArrayEquals(check, Arrays.copyOf(dob.getData(), dob.getLength()));
}
@Test
public void TestDataInputByteBufferCompatibility() throws IOException {
DataOutputBuffer dob = new DataOutputBuffer();
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
writeJunk(dob, r, seed, 1000);
ByteBuffer buf = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
DataInputByteBuffer dib = new DataInputByteBuffer();
dib.reset(buf);
readJunk(dib, r, seed, 1000);
}
@Test
public void TestDataOutputByteBufferCompatibility() throws IOException {
DataOutputByteBuffer dob = new DataOutputByteBuffer();
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
writeJunk(dob, r, seed, 1000);
ByteBuffer buf = ByteBuffer.allocate(dob.getLength());
for (ByteBuffer b : dob.getData()) {
buf.put(b);
}
buf.flip();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(buf.array(), 0, buf.remaining());
readJunk(dib, r, seed, 1000);
}
}
| 6,454 | 31.766497 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritableName.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import junit.framework.TestCase;
/** Unit tests for WritableName. */
public class TestWritableName extends TestCase {
public TestWritableName(String name) {
super(name);
}
/** Example class used in test cases below. */
public static class SimpleWritable implements Writable {
private static final Random RANDOM = new Random();
int state = RANDOM.nextInt();
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(state);
}
@Override
public void readFields(DataInput in) throws IOException {
this.state = in.readInt();
}
public static SimpleWritable read(DataInput in) throws IOException {
SimpleWritable result = new SimpleWritable();
result.readFields(in);
return result;
}
/** Required by test code, below. */
@Override
public boolean equals(Object o) {
if (!(o instanceof SimpleWritable))
return false;
SimpleWritable other = (SimpleWritable)o;
return this.state == other.state;
}
}
private static final String testName = "mystring";
public void testGoodName() throws Exception {
Configuration conf = new Configuration();
Class<?> test = WritableName.getClass("long",conf);
assertTrue(test != null);
}
public void testSetName() throws Exception {
Configuration conf = new Configuration();
WritableName.setName(SimpleWritable.class, testName);
Class<?> test = WritableName.getClass(testName,conf);
assertTrue(test.equals(SimpleWritable.class));
}
public void testAddName() throws Exception {
Configuration conf = new Configuration();
String altName = testName + ".alt";
WritableName.setName(SimpleWritable.class, testName);
WritableName.addName(SimpleWritable.class, altName);
Class<?> test = WritableName.getClass(altName, conf);
assertTrue(test.equals(SimpleWritable.class));
// check original name still works
test = WritableName.getClass(testName, conf);
assertTrue(test.equals(SimpleWritable.class));
}
public void testBadName() throws Exception {
Configuration conf = new Configuration();
try {
WritableName.getClass("unknown_junk",conf);
assertTrue(false);
} catch(IOException e) {
assertTrue(e.getMessage().matches(".*unknown_junk.*"));
}
}
}
| 3,331 | 28.75 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import java.util.*;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.SequenceFile.Metadata;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.serializer.avro.AvroReflectSerialization;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.conf.*;
import org.mockito.Mockito;
/** Support for flat files of binary key/value pairs. */
public class TestSequenceFile extends TestCase {
private static final Log LOG = LogFactory.getLog(TestSequenceFile.class);
private Configuration conf = new Configuration();
public TestSequenceFile() { }
public TestSequenceFile(String name) { super(name); }
/** Unit tests for SequenceFile. */
public void testZlibSequenceFile() throws Exception {
LOG.info("Testing SequenceFile with DefaultCodec");
compressedSeqFileTest(new DefaultCodec());
LOG.info("Successfully tested SequenceFile with DefaultCodec");
}
public void compressedSeqFileTest(CompressionCodec codec) throws Exception {
int count = 1024 * 10;
int megabytes = 1;
int factor = 5;
Path file = new Path(System.getProperty("test.build.data",".")+"/test.seq");
Path recordCompressedFile =
new Path(System.getProperty("test.build.data",".")+"/test.rc.seq");
Path blockCompressedFile =
new Path(System.getProperty("test.build.data",".")+"/test.bc.seq");
int seed = new Random().nextInt();
LOG.info("Seed = " + seed);
FileSystem fs = FileSystem.getLocal(conf);
try {
// SequenceFile.Writer
writeTest(fs, count, seed, file, CompressionType.NONE, null);
readTest(fs, count, seed, file);
sortTest(fs, count, megabytes, factor, false, file);
checkSort(fs, count, seed, file);
sortTest(fs, count, megabytes, factor, true, file);
checkSort(fs, count, seed, file);
mergeTest(fs, count, seed, file, CompressionType.NONE, false,
factor, megabytes);
checkSort(fs, count, seed, file);
mergeTest(fs, count, seed, file, CompressionType.NONE, true,
factor, megabytes);
checkSort(fs, count, seed, file);
// SequenceFile.RecordCompressWriter
writeTest(fs, count, seed, recordCompressedFile, CompressionType.RECORD,
codec);
readTest(fs, count, seed, recordCompressedFile);
sortTest(fs, count, megabytes, factor, false, recordCompressedFile);
checkSort(fs, count, seed, recordCompressedFile);
sortTest(fs, count, megabytes, factor, true, recordCompressedFile);
checkSort(fs, count, seed, recordCompressedFile);
mergeTest(fs, count, seed, recordCompressedFile,
CompressionType.RECORD, false, factor, megabytes);
checkSort(fs, count, seed, recordCompressedFile);
mergeTest(fs, count, seed, recordCompressedFile,
CompressionType.RECORD, true, factor, megabytes);
checkSort(fs, count, seed, recordCompressedFile);
// SequenceFile.BlockCompressWriter
writeTest(fs, count, seed, blockCompressedFile, CompressionType.BLOCK,
codec);
readTest(fs, count, seed, blockCompressedFile);
sortTest(fs, count, megabytes, factor, false, blockCompressedFile);
checkSort(fs, count, seed, blockCompressedFile);
sortTest(fs, count, megabytes, factor, true, blockCompressedFile);
checkSort(fs, count, seed, blockCompressedFile);
mergeTest(fs, count, seed, blockCompressedFile, CompressionType.BLOCK,
false, factor, megabytes);
checkSort(fs, count, seed, blockCompressedFile);
mergeTest(fs, count, seed, blockCompressedFile, CompressionType.BLOCK,
true, factor, megabytes);
checkSort(fs, count, seed, blockCompressedFile);
} finally {
fs.close();
}
}
private void writeTest(FileSystem fs, int count, int seed, Path file,
CompressionType compressionType, CompressionCodec codec)
throws IOException {
fs.delete(file, true);
LOG.info("creating " + count + " records with " + compressionType +
" compression");
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, conf, file,
RandomDatum.class, RandomDatum.class, compressionType, codec);
RandomDatum.Generator generator = new RandomDatum.Generator(seed);
for (int i = 0; i < count; i++) {
generator.next();
RandomDatum key = generator.getKey();
RandomDatum value = generator.getValue();
writer.append(key, value);
}
writer.close();
}
private void readTest(FileSystem fs, int count, int seed, Path file)
throws IOException {
LOG.debug("reading " + count + " records");
SequenceFile.Reader reader = new SequenceFile.Reader(fs, file, conf);
RandomDatum.Generator generator = new RandomDatum.Generator(seed);
RandomDatum k = new RandomDatum();
RandomDatum v = new RandomDatum();
DataOutputBuffer rawKey = new DataOutputBuffer();
SequenceFile.ValueBytes rawValue = reader.createValueBytes();
for (int i = 0; i < count; i++) {
generator.next();
RandomDatum key = generator.getKey();
RandomDatum value = generator.getValue();
try {
if ((i%5) == 0) {
// Testing 'raw' apis
rawKey.reset();
reader.nextRaw(rawKey, rawValue);
} else {
// Testing 'non-raw' apis
if ((i%2) == 0) {
reader.next(k);
reader.getCurrentValue(v);
} else {
reader.next(k, v);
}
// Check
if (!k.equals(key))
throw new RuntimeException("wrong key at " + i);
if (!v.equals(value))
throw new RuntimeException("wrong value at " + i);
}
} catch (IOException ioe) {
LOG.info("Problem on row " + i);
LOG.info("Expected key = " + key);
LOG.info("Expected len = " + key.getLength());
LOG.info("Actual key = " + k);
LOG.info("Actual len = " + k.getLength());
LOG.info("Expected value = " + value);
LOG.info("Expected len = " + value.getLength());
LOG.info("Actual value = " + v);
LOG.info("Actual len = " + v.getLength());
LOG.info("Key equals: " + k.equals(key));
LOG.info("value equals: " + v.equals(value));
throw ioe;
}
}
reader.close();
}
private void sortTest(FileSystem fs, int count, int megabytes,
int factor, boolean fast, Path file)
throws IOException {
fs.delete(new Path(file+".sorted"), true);
SequenceFile.Sorter sorter = newSorter(fs, fast, megabytes, factor);
LOG.debug("sorting " + count + " records");
sorter.sort(file, file.suffix(".sorted"));
LOG.info("done sorting " + count + " debug");
}
private void checkSort(FileSystem fs, int count, int seed, Path file)
throws IOException {
LOG.info("sorting " + count + " records in memory for debug");
RandomDatum.Generator generator = new RandomDatum.Generator(seed);
SortedMap<RandomDatum, RandomDatum> map =
new TreeMap<RandomDatum, RandomDatum>();
for (int i = 0; i < count; i++) {
generator.next();
RandomDatum key = generator.getKey();
RandomDatum value = generator.getValue();
map.put(key, value);
}
LOG.debug("checking order of " + count + " records");
RandomDatum k = new RandomDatum();
RandomDatum v = new RandomDatum();
Iterator<Map.Entry<RandomDatum, RandomDatum>> iterator =
map.entrySet().iterator();
SequenceFile.Reader reader =
new SequenceFile.Reader(fs, file.suffix(".sorted"), conf);
for (int i = 0; i < count; i++) {
Map.Entry<RandomDatum, RandomDatum> entry = iterator.next();
RandomDatum key = entry.getKey();
RandomDatum value = entry.getValue();
reader.next(k, v);
if (!k.equals(key))
throw new RuntimeException("wrong key at " + i);
if (!v.equals(value))
throw new RuntimeException("wrong value at " + i);
}
reader.close();
LOG.debug("sucessfully checked " + count + " records");
}
private void mergeTest(FileSystem fs, int count, int seed, Path file,
CompressionType compressionType,
boolean fast, int factor, int megabytes)
throws IOException {
LOG.debug("creating "+factor+" files with "+count/factor+" records");
SequenceFile.Writer[] writers = new SequenceFile.Writer[factor];
Path[] names = new Path[factor];
Path[] sortedNames = new Path[factor];
for (int i = 0; i < factor; i++) {
names[i] = file.suffix("."+i);
sortedNames[i] = names[i].suffix(".sorted");
fs.delete(names[i], true);
fs.delete(sortedNames[i], true);
writers[i] = SequenceFile.createWriter(fs, conf, names[i],
RandomDatum.class, RandomDatum.class, compressionType);
}
RandomDatum.Generator generator = new RandomDatum.Generator(seed);
for (int i = 0; i < count; i++) {
generator.next();
RandomDatum key = generator.getKey();
RandomDatum value = generator.getValue();
writers[i%factor].append(key, value);
}
for (int i = 0; i < factor; i++)
writers[i].close();
for (int i = 0; i < factor; i++) {
LOG.debug("sorting file " + i + " with " + count/factor + " records");
newSorter(fs, fast, megabytes, factor).sort(names[i], sortedNames[i]);
}
LOG.info("merging " + factor + " files with " + count/factor + " debug");
fs.delete(new Path(file+".sorted"), true);
newSorter(fs, fast, megabytes, factor)
.merge(sortedNames, file.suffix(".sorted"));
}
private SequenceFile.Sorter newSorter(FileSystem fs,
boolean fast,
int megabytes, int factor) {
SequenceFile.Sorter sorter =
fast
? new SequenceFile.Sorter(fs, new RandomDatum.Comparator(),
RandomDatum.class, RandomDatum.class, conf)
: new SequenceFile.Sorter(fs, RandomDatum.class, RandomDatum.class, conf);
sorter.setMemory(megabytes * 1024*1024);
sorter.setFactor(factor);
return sorter;
}
/** Unit tests for SequenceFile metadata. */
public void testSequenceFileMetadata() throws Exception {
LOG.info("Testing SequenceFile with metadata");
int count = 1024 * 10;
CompressionCodec codec = new DefaultCodec();
Path file = new Path(System.getProperty("test.build.data",".")+"/test.seq.metadata");
Path sortedFile =
new Path(System.getProperty("test.build.data",".")+"/test.sorted.seq.metadata");
Path recordCompressedFile =
new Path(System.getProperty("test.build.data",".")+"/test.rc.seq.metadata");
Path blockCompressedFile =
new Path(System.getProperty("test.build.data",".")+"/test.bc.seq.metadata");
FileSystem fs = FileSystem.getLocal(conf);
SequenceFile.Metadata theMetadata = new SequenceFile.Metadata();
theMetadata.set(new Text("name_1"), new Text("value_1"));
theMetadata.set(new Text("name_2"), new Text("value_2"));
theMetadata.set(new Text("name_3"), new Text("value_3"));
theMetadata.set(new Text("name_4"), new Text("value_4"));
int seed = new Random().nextInt();
try {
// SequenceFile.Writer
writeMetadataTest(fs, count, seed, file, CompressionType.NONE, null, theMetadata);
SequenceFile.Metadata aMetadata = readMetadata(fs, file);
if (!theMetadata.equals(aMetadata)) {
LOG.info("The original metadata:\n" + theMetadata.toString());
LOG.info("The retrieved metadata:\n" + aMetadata.toString());
throw new RuntimeException("metadata not match: " + 1);
}
// SequenceFile.RecordCompressWriter
writeMetadataTest(fs, count, seed, recordCompressedFile, CompressionType.RECORD,
codec, theMetadata);
aMetadata = readMetadata(fs, recordCompressedFile);
if (!theMetadata.equals(aMetadata)) {
LOG.info("The original metadata:\n" + theMetadata.toString());
LOG.info("The retrieved metadata:\n" + aMetadata.toString());
throw new RuntimeException("metadata not match: " + 2);
}
// SequenceFile.BlockCompressWriter
writeMetadataTest(fs, count, seed, blockCompressedFile, CompressionType.BLOCK,
codec, theMetadata);
aMetadata =readMetadata(fs, blockCompressedFile);
if (!theMetadata.equals(aMetadata)) {
LOG.info("The original metadata:\n" + theMetadata.toString());
LOG.info("The retrieved metadata:\n" + aMetadata.toString());
throw new RuntimeException("metadata not match: " + 3);
}
// SequenceFile.Sorter
sortMetadataTest(fs, file, sortedFile, theMetadata);
aMetadata = readMetadata(fs, recordCompressedFile);
if (!theMetadata.equals(aMetadata)) {
LOG.info("The original metadata:\n" + theMetadata.toString());
LOG.info("The retrieved metadata:\n" + aMetadata.toString());
throw new RuntimeException("metadata not match: " + 4);
}
} finally {
fs.close();
}
LOG.info("Successfully tested SequenceFile with metadata");
}
private SequenceFile.Metadata readMetadata(FileSystem fs, Path file)
throws IOException {
LOG.info("reading file: " + file.toString());
SequenceFile.Reader reader = new SequenceFile.Reader(fs, file, conf);
SequenceFile.Metadata meta = reader.getMetadata();
reader.close();
return meta;
}
private void writeMetadataTest(FileSystem fs, int count, int seed, Path file,
CompressionType compressionType, CompressionCodec codec, SequenceFile.Metadata metadata)
throws IOException {
fs.delete(file, true);
LOG.info("creating " + count + " records with metadata and with " + compressionType +
" compression");
SequenceFile.Writer writer =
SequenceFile.createWriter(fs, conf, file,
RandomDatum.class, RandomDatum.class, compressionType, codec, null, metadata);
RandomDatum.Generator generator = new RandomDatum.Generator(seed);
for (int i = 0; i < count; i++) {
generator.next();
RandomDatum key = generator.getKey();
RandomDatum value = generator.getValue();
writer.append(key, value);
}
writer.close();
}
private void sortMetadataTest(FileSystem fs, Path unsortedFile, Path sortedFile, SequenceFile.Metadata metadata)
throws IOException {
fs.delete(sortedFile, true);
LOG.info("sorting: " + unsortedFile + " to: " + sortedFile);
final WritableComparator comparator = WritableComparator.get(RandomDatum.class);
SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, comparator, RandomDatum.class, RandomDatum.class, conf, metadata);
sorter.sort(new Path[] { unsortedFile }, sortedFile, false);
}
public void testClose() throws IOException {
Configuration conf = new Configuration();
LocalFileSystem fs = FileSystem.getLocal(conf);
// create a sequence file 1
Path path1 = new Path(System.getProperty("test.build.data",".")+"/test1.seq");
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, path1,
Text.class, NullWritable.class, CompressionType.BLOCK);
writer.append(new Text("file1-1"), NullWritable.get());
writer.append(new Text("file1-2"), NullWritable.get());
writer.close();
Path path2 = new Path(System.getProperty("test.build.data",".")+"/test2.seq");
writer = SequenceFile.createWriter(fs, conf, path2, Text.class,
NullWritable.class, CompressionType.BLOCK);
writer.append(new Text("file2-1"), NullWritable.get());
writer.append(new Text("file2-2"), NullWritable.get());
writer.close();
// Create a reader which uses 4 BuiltInZLibInflater instances
SequenceFile.Reader reader = new SequenceFile.Reader(fs, path1, conf);
// Returns the 4 BuiltInZLibInflater instances to the CodecPool
reader.close();
// The second close _could_ erroneously returns the same
// 4 BuiltInZLibInflater instances to the CodecPool again
reader.close();
// The first reader gets 4 BuiltInZLibInflater instances from the CodecPool
SequenceFile.Reader reader1 = new SequenceFile.Reader(fs, path1, conf);
// read first value from reader1
Text text = new Text();
reader1.next(text);
assertEquals("file1-1", text.toString());
// The second reader _could_ get the same 4 BuiltInZLibInflater
// instances from the CodePool as reader1
SequenceFile.Reader reader2 = new SequenceFile.Reader(fs, path2, conf);
// read first value from reader2
reader2.next(text);
assertEquals("file2-1", text.toString());
// read second value from reader1
reader1.next(text);
assertEquals("file1-2", text.toString());
// read second value from reader2 (this throws an exception)
reader2.next(text);
assertEquals("file2-2", text.toString());
assertFalse(reader1.next(text));
assertFalse(reader2.next(text));
}
/**
* Test that makes sure the FileSystem passed to createWriter
* @throws Exception
*/
public void testCreateUsesFsArg() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
FileSystem spyFs = Mockito.spy(fs);
Path p = new Path(System.getProperty("test.build.data", ".")+"/testCreateUsesFSArg.seq");
SequenceFile.Writer writer = SequenceFile.createWriter(
spyFs, conf, p, NullWritable.class, NullWritable.class);
writer.close();
Mockito.verify(spyFs).getDefaultReplication(p);
}
private static class TestFSDataInputStream extends FSDataInputStream {
private boolean closed = false;
private TestFSDataInputStream(InputStream in) throws IOException {
super(in);
}
@Override
public void close() throws IOException {
closed = true;
super.close();
}
public boolean isClosed() {
return closed;
}
}
public void testCloseForErroneousSequenceFile()
throws IOException {
Configuration conf = new Configuration();
LocalFileSystem fs = FileSystem.getLocal(conf);
// create an empty file (which is not a valid sequence file)
Path path = new Path(System.getProperty("test.build.data",".")+"/broken.seq");
fs.create(path).close();
// try to create SequenceFile.Reader
final TestFSDataInputStream[] openedFile = new TestFSDataInputStream[1];
try {
new SequenceFile.Reader(fs, path, conf) {
// this method is called by the SequenceFile.Reader constructor, overwritten, so we can access the opened file
@Override
protected FSDataInputStream openFile(FileSystem fs, Path file, int bufferSize, long length) throws IOException {
final InputStream in = super.openFile(fs, file, bufferSize, length);
openedFile[0] = new TestFSDataInputStream(in);
return openedFile[0];
}
};
fail("IOException expected.");
} catch (IOException expected) {}
assertNotNull(path + " should have been opened.", openedFile[0]);
assertTrue("InputStream for " + path + " should have been closed.", openedFile[0].isClosed());
}
/**
* Test that makes sure createWriter succeeds on a file that was
* already created
* @throws IOException
*/
public void testCreateWriterOnExistingFile() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path name = new Path(new Path(System.getProperty("test.build.data","."),
"createWriterOnExistingFile") , "file");
fs.create(name);
SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
RandomDatum.class, 512, (short) 1, 4096, false,
CompressionType.NONE, null, new Metadata());
}
public void testRecursiveSeqFileCreate() throws IOException {
FileSystem fs = FileSystem.getLocal(conf);
Path name = new Path(new Path(System.getProperty("test.build.data","."),
"recursiveCreateDir") , "file");
boolean createParent = false;
try {
SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
RandomDatum.class, 512, (short) 1, 4096, createParent,
CompressionType.NONE, null, new Metadata());
fail("Expected an IOException due to missing parent");
} catch (IOException ioe) {
// Expected
}
createParent = true;
SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
RandomDatum.class, 512, (short) 1, 4096, createParent,
CompressionType.NONE, null, new Metadata());
// should succeed, fails if exception thrown
}
public void testSerializationAvailability() throws IOException {
Configuration conf = new Configuration();
Path path = new Path(System.getProperty("test.build.data", "."),
"serializationAvailability");
// Check if any serializers aren't found.
try {
SequenceFile.createWriter(
conf,
SequenceFile.Writer.file(path),
SequenceFile.Writer.keyClass(String.class),
SequenceFile.Writer.valueClass(NullWritable.class));
// Note: This may also fail someday if JavaSerialization
// is activated by default.
fail("Must throw IOException for missing serializer for the Key class");
} catch (IOException e) {
assertTrue(e.getMessage().startsWith(
"Could not find a serializer for the Key class: '" +
String.class.getName() + "'."));
}
try {
SequenceFile.createWriter(
conf,
SequenceFile.Writer.file(path),
SequenceFile.Writer.keyClass(NullWritable.class),
SequenceFile.Writer.valueClass(String.class));
// Note: This may also fail someday if JavaSerialization
// is activated by default.
fail("Must throw IOException for missing serializer for the Value class");
} catch (IOException e) {
assertTrue(e.getMessage().startsWith(
"Could not find a serializer for the Value class: '" +
String.class.getName() + "'."));
}
// Write a simple file to test deserialization failures with
writeTest(FileSystem.get(conf), 1, 1, path, CompressionType.NONE, null);
// Remove Writable serializations, to enforce error.
conf.setStrings(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
AvroReflectSerialization.class.getName());
// Now check if any deserializers aren't found.
try {
new SequenceFile.Reader(
conf,
SequenceFile.Reader.file(path));
fail("Must throw IOException for missing deserializer for the Key class");
} catch (IOException e) {
assertTrue(e.getMessage().startsWith(
"Could not find a deserializer for the Key class: '" +
RandomDatum.class.getName() + "'."));
}
}
/** For debugging and testing. */
public static void main(String[] args) throws Exception {
int count = 1024 * 1024;
int megabytes = 1;
int factor = 10;
boolean create = true;
boolean rwonly = false;
boolean check = false;
boolean fast = false;
boolean merge = false;
String compressType = "NONE";
String compressionCodec = "org.apache.hadoop.io.compress.DefaultCodec";
Path file = null;
int seed = new Random().nextInt();
String usage = "Usage: SequenceFile " +
"[-count N] " +
"[-seed #] [-check] [-compressType <NONE|RECORD|BLOCK>] " +
"-codec <compressionCodec> " +
"[[-rwonly] | {[-megabytes M] [-factor F] [-nocreate] [-fast] [-merge]}] " +
" file";
if (args.length == 0) {
System.err.println(usage);
System.exit(-1);
}
FileSystem fs = null;
try {
for (int i=0; i < args.length; ++i) { // parse command line
if (args[i] == null) {
continue;
} else if (args[i].equals("-count")) {
count = Integer.parseInt(args[++i]);
} else if (args[i].equals("-megabytes")) {
megabytes = Integer.parseInt(args[++i]);
} else if (args[i].equals("-factor")) {
factor = Integer.parseInt(args[++i]);
} else if (args[i].equals("-seed")) {
seed = Integer.parseInt(args[++i]);
} else if (args[i].equals("-rwonly")) {
rwonly = true;
} else if (args[i].equals("-nocreate")) {
create = false;
} else if (args[i].equals("-check")) {
check = true;
} else if (args[i].equals("-fast")) {
fast = true;
} else if (args[i].equals("-merge")) {
merge = true;
} else if (args[i].equals("-compressType")) {
compressType = args[++i];
} else if (args[i].equals("-codec")) {
compressionCodec = args[++i];
} else {
// file is required parameter
file = new Path(args[i]);
}
}
TestSequenceFile test = new TestSequenceFile();
fs = file.getFileSystem(test.conf);
LOG.info("count = " + count);
LOG.info("megabytes = " + megabytes);
LOG.info("factor = " + factor);
LOG.info("create = " + create);
LOG.info("seed = " + seed);
LOG.info("rwonly = " + rwonly);
LOG.info("check = " + check);
LOG.info("fast = " + fast);
LOG.info("merge = " + merge);
LOG.info("compressType = " + compressType);
LOG.info("compressionCodec = " + compressionCodec);
LOG.info("file = " + file);
if (rwonly && (!create || merge || fast)) {
System.err.println(usage);
System.exit(-1);
}
CompressionType compressionType =
CompressionType.valueOf(compressType);
CompressionCodec codec = (CompressionCodec)ReflectionUtils.newInstance(
test.conf.getClassByName(compressionCodec),
test.conf);
if (rwonly || (create && !merge)) {
test.writeTest(fs, count, seed, file, compressionType, codec);
test.readTest(fs, count, seed, file);
}
if (!rwonly) {
if (merge) {
test.mergeTest(fs, count, seed, file, compressionType,
fast, factor, megabytes);
} else {
test.sortTest(fs, count, megabytes, factor, fast, file);
}
}
if (check) {
test.checkSort(fs, count, seed, file);
}
} finally {
fs.close();
}
}
}
| 27,783 | 37.217331 | 128 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestEnumSetWritable.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Iterator;
import java.lang.reflect.Type;
import junit.framework.TestCase;
/** Unit test for EnumSetWritable */
public class TestEnumSetWritable extends TestCase {
enum TestEnumSet {
CREATE, OVERWRITE, APPEND;
}
EnumSet<TestEnumSet> nonEmptyFlag = EnumSet.of(TestEnumSet.APPEND);
EnumSetWritable<TestEnumSet> nonEmptyFlagWritable =
new EnumSetWritable<TestEnumSet>(nonEmptyFlag);
@SuppressWarnings("unchecked")
public void testSerializeAndDeserializeNonEmpty() throws IOException {
DataOutputBuffer out = new DataOutputBuffer();
ObjectWritable.writeObject(out, nonEmptyFlagWritable, nonEmptyFlagWritable
.getClass(), null);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
EnumSet<TestEnumSet> read = ((EnumSetWritable<TestEnumSet>) ObjectWritable
.readObject(in, null)).get();
assertEquals(read, nonEmptyFlag);
}
EnumSet<TestEnumSet> emptyFlag = EnumSet.noneOf(TestEnumSet.class);
@SuppressWarnings("unchecked")
public void testSerializeAndDeserializeEmpty() throws IOException {
boolean gotException = false;
try {
new EnumSetWritable<TestEnumSet>(emptyFlag);
} catch (RuntimeException e) {
gotException = true;
}
assertTrue(
"Instantiation of empty EnumSetWritable with no element type class "
+ "provided should throw exception.",
gotException);
EnumSetWritable<TestEnumSet> emptyFlagWritable =
new EnumSetWritable<TestEnumSet>(emptyFlag, TestEnumSet.class);
DataOutputBuffer out = new DataOutputBuffer();
ObjectWritable.writeObject(out, emptyFlagWritable, emptyFlagWritable
.getClass(), null);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
EnumSet<TestEnumSet> read = ((EnumSetWritable<TestEnumSet>) ObjectWritable
.readObject(in, null)).get();
assertEquals(read, emptyFlag);
}
@SuppressWarnings("unchecked")
public void testSerializeAndDeserializeNull() throws IOException {
boolean gotException = false;
try {
new EnumSetWritable<TestEnumSet>(null);
} catch (RuntimeException e) {
gotException = true;
}
assertTrue(
"Instantiation of empty EnumSetWritable with no element type class "
+ "provided should throw exception",
gotException);
EnumSetWritable<TestEnumSet> nullFlagWritable =
new EnumSetWritable<TestEnumSet>(null, TestEnumSet.class);
DataOutputBuffer out = new DataOutputBuffer();
ObjectWritable.writeObject(out, nullFlagWritable, nullFlagWritable
.getClass(), null);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
EnumSet<TestEnumSet> read = ((EnumSetWritable<TestEnumSet>) ObjectWritable
.readObject(in, null)).get();
assertEquals(read, null);
}
public EnumSetWritable<TestEnumSet> testField;
public void testAvroReflect() throws Exception {
String schema = "{\"type\":\"array\",\"items\":{\"type\":\"enum\","
+ "\"name\":\"TestEnumSet\","
+ "\"namespace\":\"org.apache.hadoop.io.TestEnumSetWritable$\","
+ "\"symbols\":[\"CREATE\",\"OVERWRITE\",\"APPEND\"]},"
+ "\"java-class\":\"org.apache.hadoop.io.EnumSetWritable\"}";
Type type =
TestEnumSetWritable.class.getField("testField").getGenericType();
AvroTestUtil.testReflect(nonEmptyFlagWritable, type, schema);
}
/**
* test {@link EnumSetWritable} equals() method
*/
public void testEnumSetWritableEquals() {
EnumSetWritable<TestEnumSet> eset1 = new EnumSetWritable<TestEnumSet>(
EnumSet.of(TestEnumSet.APPEND, TestEnumSet.CREATE), TestEnumSet.class);
EnumSetWritable<TestEnumSet> eset2 = new EnumSetWritable<TestEnumSet>(
EnumSet.of(TestEnumSet.APPEND, TestEnumSet.CREATE), TestEnumSet.class);
assertTrue("testEnumSetWritableEquals error !!!", eset1.equals(eset2));
assertFalse("testEnumSetWritableEquals error !!!",
eset1.equals(new EnumSetWritable<TestEnumSet>(EnumSet.of(
TestEnumSet.APPEND, TestEnumSet.CREATE, TestEnumSet.OVERWRITE),
TestEnumSet.class)));
assertTrue("testEnumSetWritableEquals getElementType error !!!", eset1
.getElementType().equals(TestEnumSet.class));
}
/**
* test {@code EnumSetWritable.write(DataOutputBuffer out)}
* and iteration by TestEnumSet through iterator().
*/
public void testEnumSetWritableWriteRead() throws Exception {
EnumSetWritable<TestEnumSet> srcSet = new EnumSetWritable<TestEnumSet>(
EnumSet.of(TestEnumSet.APPEND, TestEnumSet.CREATE), TestEnumSet.class);
DataOutputBuffer out = new DataOutputBuffer();
srcSet.write(out);
EnumSetWritable<TestEnumSet> dstSet = new EnumSetWritable<TestEnumSet>();
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
dstSet.readFields(in);
EnumSet<TestEnumSet> result = dstSet.get();
Iterator<TestEnumSet> dstIter = result.iterator();
Iterator<TestEnumSet> srcIter = srcSet.iterator();
while (dstIter.hasNext() && srcIter.hasNext()) {
assertEquals("testEnumSetWritableWriteRead error !!!", dstIter.next(),
srcIter.next());
}
}
}
| 6,197 | 37.259259 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMD5Hash.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import org.apache.hadoop.io.TestWritable;
import junit.framework.TestCase;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.security.MessageDigest;
import java.util.Random;
/** Unit tests for MD5Hash. */
public class TestMD5Hash extends TestCase {
public TestMD5Hash(String name) { super(name); }
private static final Random RANDOM = new Random();
public static MD5Hash getTestHash() throws Exception {
MessageDigest digest = MessageDigest.getInstance("MD5");
byte[] buffer = new byte[1024];
RANDOM.nextBytes(buffer);
digest.update(buffer);
return new MD5Hash(digest.digest());
}
protected static byte[] D00 = new byte[] {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
protected static byte[] DFF = new byte[] {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
public void testMD5Hash() throws Exception {
MD5Hash md5Hash = getTestHash();
final MD5Hash md5Hash00
= new MD5Hash(D00);
final MD5Hash md5HashFF
= new MD5Hash(DFF);
MD5Hash orderedHash = new MD5Hash(new byte[]{1,2,3,4,5,6,7,8,9,10,11,12,
13,14,15,16});
MD5Hash backwardHash = new MD5Hash(new byte[]{-1,-2,-3,-4,-5,-6,-7,-8,
-9,-10,-11,-12, -13, -14,
-15,-16});
MD5Hash closeHash1 = new MD5Hash(new byte[]{-1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0});
MD5Hash closeHash2 = new MD5Hash(new byte[]{-1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0});
// test i/o
TestWritable.testWritable(md5Hash);
TestWritable.testWritable(md5Hash00);
TestWritable.testWritable(md5HashFF);
// test equals()
assertEquals(md5Hash, md5Hash);
assertEquals(md5Hash00, md5Hash00);
assertEquals(md5HashFF, md5HashFF);
// test compareTo()
assertTrue(md5Hash.compareTo(md5Hash) == 0);
assertTrue(md5Hash00.compareTo(md5Hash) < 0);
assertTrue(md5HashFF.compareTo(md5Hash) > 0);
// test toString and string ctor
assertEquals(md5Hash, new MD5Hash(md5Hash.toString()));
assertEquals(md5Hash00, new MD5Hash(md5Hash00.toString()));
assertEquals(md5HashFF, new MD5Hash(md5HashFF.toString()));
assertEquals(0x01020304, orderedHash.quarterDigest());
assertEquals(0xfffefdfc, backwardHash.quarterDigest());
assertEquals(0x0102030405060708L, orderedHash.halfDigest());
assertEquals(0xfffefdfcfbfaf9f8L, backwardHash.halfDigest());
assertTrue("hash collision",
closeHash1.hashCode() != closeHash2.hashCode());
Thread t1 = new Thread() {
@Override
public void run() {
for (int i = 0; i < 100; i++) {
MD5Hash hash = new MD5Hash(DFF);
assertEquals(hash, md5HashFF);
}
}
};
Thread t2 = new Thread() {
@Override
public void run() {
for (int i = 0; i < 100; i++) {
MD5Hash hash = new MD5Hash(D00);
assertEquals(hash, md5Hash00);
}
}
};
t1.start();
t2.start();
t1.join();
t2.join();
}
public void testFactoryReturnsClearedHashes() throws IOException {
// A stream that will throw an IOE after reading some bytes
ByteArrayInputStream failingStream = new ByteArrayInputStream(
"xxxx".getBytes()) {
@Override
public synchronized int read(byte[] b) throws IOException {
int ret = super.read(b);
if (ret <= 0) {
throw new IOException("Injected fault");
}
return ret;
}
};
final String TEST_STRING = "hello";
// Calculate the correct digest for the test string
MD5Hash expectedHash = MD5Hash.digest(TEST_STRING);
// Hashing again should give the same result
assertEquals(expectedHash, MD5Hash.digest(TEST_STRING));
// Try to hash a stream which will fail halfway through
try {
MD5Hash.digest(failingStream);
fail("didnt throw!");
} catch (Exception e) {
// expected
}
// Make sure we get the same result
assertEquals(expectedHash, MD5Hash.digest(TEST_STRING));
}
}
| 5,108 | 32.611842 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.util.Progressable;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
public class TestMapFile {
private static final Path TEST_DIR = new Path(
System.getProperty("test.build.data", "/tmp"),
TestMapFile.class.getSimpleName());
private static Configuration conf = new Configuration();
@Before
public void setup() throws Exception {
LocalFileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) {
Assert.fail("Can't clean up test root dir");
}
fs.mkdirs(TEST_DIR);
}
private static final Progressable defaultProgressable = new Progressable() {
@Override
public void progress() {
}
};
private static final CompressionCodec defaultCodec = new CompressionCodec() {
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return mock(CompressionOutputStream.class);
}
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor) throws IOException {
return mock(CompressionOutputStream.class);
}
@Override
public Class<? extends Compressor> getCompressorType() {
return null;
}
@Override
public Compressor createCompressor() {
return null;
}
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return null;
}
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor) throws IOException {
return null;
}
@Override
public Class<? extends Decompressor> getDecompressorType() {
return null;
}
@Override
public Decompressor createDecompressor() {
return null;
}
@Override
public String getDefaultExtension() {
return null;
}
};
private MapFile.Writer createWriter(String fileName,
Class<? extends WritableComparable<?>> keyClass,
Class<? extends Writable> valueClass) throws IOException {
Path dirName = new Path(TEST_DIR, fileName);
MapFile.Writer.setIndexInterval(conf, 4);
return new MapFile.Writer(conf, dirName, MapFile.Writer.keyClass(keyClass),
MapFile.Writer.valueClass(valueClass));
}
private MapFile.Reader createReader(String fileName,
Class<? extends WritableComparable<?>> keyClass) throws IOException {
Path dirName = new Path(TEST_DIR, fileName);
return new MapFile.Reader(dirName, conf,
MapFile.Reader.comparator(new WritableComparator(keyClass)));
}
/**
* test {@code MapFile.Reader.getClosest()} method
*
*/
@Test
public void testGetClosestOnCurrentApi() throws Exception {
final String TEST_PREFIX = "testGetClosestOnCurrentApi.mapfile";
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = createWriter(TEST_PREFIX, Text.class, Text.class);
int FIRST_KEY = 1;
// Test keys: 11,21,31,...,91
for (int i = FIRST_KEY; i < 100; i += 10) {
Text t = new Text(Integer.toString(i));
writer.append(t, t);
}
writer.close();
reader = createReader(TEST_PREFIX, Text.class);
Text key = new Text("55");
Text value = new Text();
// Test get closest with step forward
Text closest = (Text) reader.getClosest(key, value);
assertEquals(new Text("61"), closest);
// Test get closest with step back
closest = (Text) reader.getClosest(key, value, true);
assertEquals(new Text("51"), closest);
// Test get closest when we pass explicit key
final Text explicitKey = new Text("21");
closest = (Text) reader.getClosest(explicitKey, value);
assertEquals(new Text("21"), explicitKey);
// Test what happens at boundaries. Assert if searching a key that is
// less than first key in the mapfile, that the first key is returned.
key = new Text("00");
closest = (Text) reader.getClosest(key, value);
assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
// Assert that null is returned if key is > last entry in mapfile.
key = new Text("92");
closest = (Text) reader.getClosest(key, value);
assertNull("Not null key in testGetClosestWithNewCode", closest);
// If we were looking for the key before, we should get the last key
closest = (Text) reader.getClosest(key, value, true);
assertEquals(new Text("91"), closest);
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
/**
* test {@code MapFile.Reader.midKey() } method
*/
@Test
public void testMidKeyOnCurrentApi() throws Exception {
// Write a mapfile of simple data: keys are
final String TEST_PREFIX = "testMidKeyOnCurrentApi.mapfile";
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = createWriter(TEST_PREFIX, IntWritable.class, IntWritable.class);
// 0,1,....9
int SIZE = 10;
for (int i = 0; i < SIZE; i++)
writer.append(new IntWritable(i), new IntWritable(i));
writer.close();
reader = createReader(TEST_PREFIX, IntWritable.class);
assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey());
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
/**
* test {@code MapFile.Writer.rename()} method
*/
@Test
public void testRename() {
final String NEW_FILE_NAME = "test-new.mapfile";
final String OLD_FILE_NAME = "test-old.mapfile";
MapFile.Writer writer = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
writer.close();
MapFile.rename(fs, new Path(TEST_DIR, OLD_FILE_NAME).toString(),
new Path(TEST_DIR, NEW_FILE_NAME).toString());
MapFile.delete(fs, new Path(TEST_DIR, NEW_FILE_NAME).toString());
} catch (IOException ex) {
fail("testRename error " + ex);
} finally {
IOUtils.cleanup(null, writer);
}
}
/**
* test {@code MapFile.rename()}
* method with throwing {@code IOException}
*/
@Test
public void testRenameWithException() {
final String ERROR_MESSAGE = "Can't rename file";
final String NEW_FILE_NAME = "test-new.mapfile";
final String OLD_FILE_NAME = "test-old.mapfile";
MapFile.Writer writer = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
FileSystem spyFs = spy(fs);
writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
writer.close();
Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
when(spyFs.rename(oldDir, newDir)).thenThrow(
new IOException(ERROR_MESSAGE));
MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
fail("testRenameWithException no exception error !!!");
} catch (IOException ex) {
assertEquals("testRenameWithException invalid IOExceptionMessage !!!",
ex.getMessage(), ERROR_MESSAGE);
} finally {
IOUtils.cleanup(null, writer);
}
}
@Test
public void testRenameWithFalse() {
final String ERROR_MESSAGE = "Could not rename";
final String NEW_FILE_NAME = "test-new.mapfile";
final String OLD_FILE_NAME = "test-old.mapfile";
MapFile.Writer writer = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
FileSystem spyFs = spy(fs);
writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
writer.close();
Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
when(spyFs.rename(oldDir, newDir)).thenReturn(false);
MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
fail("testRenameWithException no exception error !!!");
} catch (IOException ex) {
assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!", ex
.getMessage().startsWith(ERROR_MESSAGE));
} finally {
IOUtils.cleanup(null, writer);
}
}
/**
* test throwing {@code IOException} in {@code MapFile.Writer} constructor
*/
@Test
public void testWriteWithFailDirCreation() {
String ERROR_MESSAGE = "Mkdirs failed to create directory";
Path dirName = new Path(TEST_DIR, "fail.mapfile");
MapFile.Writer writer = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
FileSystem spyFs = spy(fs);
Path pathSpy = spy(dirName);
when(pathSpy.getFileSystem(conf)).thenReturn(spyFs);
when(spyFs.mkdirs(dirName)).thenReturn(false);
writer = new MapFile.Writer(conf, pathSpy,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(Text.class));
fail("testWriteWithFailDirCreation error !!!");
} catch (IOException ex) {
assertTrue("testWriteWithFailDirCreation ex error !!!", ex.getMessage()
.startsWith(ERROR_MESSAGE));
} finally {
IOUtils.cleanup(null, writer);
}
}
/**
* test {@code MapFile.Reader.finalKey()} method
*/
@Test
public void testOnFinalKey() {
final String TEST_METHOD_KEY = "testOnFinalKey.mapfile";
int SIZE = 10;
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
IntWritable.class);
for (int i = 0; i < SIZE; i++)
writer.append(new IntWritable(i), new IntWritable(i));
writer.close();
reader = createReader(TEST_METHOD_KEY, IntWritable.class);
IntWritable expectedKey = new IntWritable(0);
reader.finalKey(expectedKey);
assertEquals("testOnFinalKey not same !!!", expectedKey, new IntWritable(
9));
} catch (IOException ex) {
fail("testOnFinalKey error !!!");
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
/**
* test {@code MapFile.Writer} constructor with key, value
* and validate it with {@code keyClass(), valueClass()} methods
*/
@Test
public void testKeyValueClasses() {
Class<? extends WritableComparable<?>> keyClass = IntWritable.class;
Class<?> valueClass = Text.class;
try {
createWriter("testKeyValueClasses.mapfile", IntWritable.class, Text.class)
.close();
assertNotNull("writer key class null error !!!",
MapFile.Writer.keyClass(keyClass));
assertNotNull("writer value class null error !!!",
MapFile.Writer.valueClass(valueClass));
} catch (IOException ex) {
fail(ex.getMessage());
}
}
/**
* test {@code MapFile.Reader.getClosest() } with wrong class key
*/
@Test
public void testReaderGetClosest() throws Exception {
final String TEST_METHOD_KEY = "testReaderWithWrongKeyClass.mapfile";
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = createWriter(TEST_METHOD_KEY, IntWritable.class, Text.class);
for (int i = 0; i < 10; i++)
writer.append(new IntWritable(i), new Text("value" + i));
writer.close();
reader = createReader(TEST_METHOD_KEY, Text.class);
reader.getClosest(new Text("2"), new Text(""));
fail("no excepted exception in testReaderWithWrongKeyClass !!!");
} catch (IOException ex) {
/* Should be thrown to pass the test */
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
/**
* test {@code MapFile.Writer.append() } with wrong key class
*/
@Test
public void testReaderWithWrongValueClass() {
final String TEST_METHOD_KEY = "testReaderWithWrongValueClass.mapfile";
MapFile.Writer writer = null;
try {
writer = createWriter(TEST_METHOD_KEY, IntWritable.class, Text.class);
writer.append(new IntWritable(0), new IntWritable(0));
fail("no excepted exception in testReaderWithWrongKeyClass !!!");
} catch (IOException ex) {
/* Should be thrown to pass the test */
} finally {
IOUtils.cleanup(null, writer);
}
}
/**
* test {@code MapFile.Reader.next(key, value)} for iteration.
*/
@Test
public void testReaderKeyIteration() {
final String TEST_METHOD_KEY = "testReaderKeyIteration.mapfile";
int SIZE = 10;
int ITERATIONS = 5;
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = createWriter(TEST_METHOD_KEY, IntWritable.class, Text.class);
int start = 0;
for (int i = 0; i < SIZE; i++)
writer.append(new IntWritable(i), new Text("Value:" + i));
writer.close();
reader = createReader(TEST_METHOD_KEY, IntWritable.class);
// test iteration
Writable startValue = new Text("Value:" + start);
int i = 0;
while (i++ < ITERATIONS) {
IntWritable key = new IntWritable(start);
Writable value = startValue;
while (reader.next(key, value)) {
assertNotNull(key);
assertNotNull(value);
}
reader.reset();
}
assertTrue("reader seek error !!!",
reader.seek(new IntWritable(SIZE / 2)));
assertFalse("reader seek error !!!",
reader.seek(new IntWritable(SIZE * 2)));
} catch (IOException ex) {
fail("reader seek error !!!");
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
/**
* test {@code MapFile.Writer.testFix} method
*/
@Test
public void testFix() {
final String INDEX_LESS_MAP_FILE = "testFix.mapfile";
int PAIR_SIZE = 20;
MapFile.Writer writer = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
Path dir = new Path(TEST_DIR, INDEX_LESS_MAP_FILE);
writer = createWriter(INDEX_LESS_MAP_FILE, IntWritable.class, Text.class);
for (int i = 0; i < PAIR_SIZE; i++)
writer.append(new IntWritable(0), new Text("value"));
writer.close();
File indexFile = new File(".", "." + INDEX_LESS_MAP_FILE + "/index");
boolean isDeleted = false;
if (indexFile.exists())
isDeleted = indexFile.delete();
if (isDeleted)
assertTrue("testFix error !!!",
MapFile.fix(fs, dir, IntWritable.class, Text.class, true, conf) == PAIR_SIZE);
} catch (Exception ex) {
fail("testFix error !!!");
} finally {
IOUtils.cleanup(null, writer);
}
}
/**
* test all available constructor for {@code MapFile.Writer}
*/
@Test
@SuppressWarnings("deprecation")
public void testDeprecatedConstructors() {
String path = new Path(TEST_DIR, "writes.mapfile").toString();
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
FileSystem fs = FileSystem.getLocal(conf);
writer = new MapFile.Writer(conf, fs, path,
IntWritable.class, Text.class, CompressionType.RECORD);
assertNotNull(writer);
writer.close();
writer = new MapFile.Writer(conf, fs, path, IntWritable.class,
Text.class, CompressionType.RECORD, defaultProgressable);
assertNotNull(writer);
writer.close();
writer = new MapFile.Writer(conf, fs, path, IntWritable.class,
Text.class, CompressionType.RECORD, defaultCodec, defaultProgressable);
assertNotNull(writer);
writer.close();
writer = new MapFile.Writer(conf, fs, path,
WritableComparator.get(Text.class), Text.class);
assertNotNull(writer);
writer.close();
writer = new MapFile.Writer(conf, fs, path,
WritableComparator.get(Text.class), Text.class,
SequenceFile.CompressionType.RECORD);
assertNotNull(writer);
writer.close();
writer = new MapFile.Writer(conf, fs, path,
WritableComparator.get(Text.class), Text.class,
CompressionType.RECORD, defaultProgressable);
assertNotNull(writer);
writer.close();
reader = new MapFile.Reader(fs, path,
WritableComparator.get(IntWritable.class), conf);
assertNotNull(reader);
assertNotNull("reader key is null !!!", reader.getKeyClass());
assertNotNull("reader value in null", reader.getValueClass());
} catch (IOException e) {
fail(e.getMessage());
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
/**
* test {@code MapFile.Writer} constructor
* with IllegalArgumentException
*
*/
@Test
public void testKeyLessWriterCreation() {
MapFile.Writer writer = null;
try {
writer = new MapFile.Writer(conf, TEST_DIR);
fail("fail in testKeyLessWriterCreation !!!");
} catch (IllegalArgumentException ex) {
} catch (Exception e) {
fail("fail in testKeyLessWriterCreation. Other ex !!!");
} finally {
IOUtils.cleanup(null, writer);
}
}
/**
* test {@code MapFile.Writer} constructor with IOException
*/
@Test
public void testPathExplosionWriterCreation() {
Path path = new Path(TEST_DIR, "testPathExplosionWriterCreation.mapfile");
String TEST_ERROR_MESSAGE = "Mkdirs failed to create directory "
+ path.getName();
MapFile.Writer writer = null;
try {
FileSystem fsSpy = spy(FileSystem.get(conf));
Path pathSpy = spy(path);
when(fsSpy.mkdirs(path)).thenThrow(new IOException(TEST_ERROR_MESSAGE));
when(pathSpy.getFileSystem(conf)).thenReturn(fsSpy);
writer = new MapFile.Writer(conf, pathSpy,
MapFile.Writer.keyClass(IntWritable.class),
MapFile.Writer.valueClass(IntWritable.class));
fail("fail in testPathExplosionWriterCreation !!!");
} catch (IOException ex) {
assertEquals("testPathExplosionWriterCreation ex message error !!!",
ex.getMessage(), TEST_ERROR_MESSAGE);
} catch (Exception e) {
fail("fail in testPathExplosionWriterCreation. Other ex !!!");
} finally {
IOUtils.cleanup(null, writer);
}
}
/**
* test {@code MapFile.Writer.append} method with desc order
*/
@Test
public void testDescOrderWithThrowExceptionWriterAppend() {
MapFile.Writer writer = null;
try {
writer = createWriter(".mapfile", IntWritable.class, Text.class);
writer.append(new IntWritable(2), new Text("value: " + 1));
writer.append(new IntWritable(2), new Text("value: " + 2));
writer.append(new IntWritable(2), new Text("value: " + 4));
writer.append(new IntWritable(1), new Text("value: " + 3));
fail("testDescOrderWithThrowExceptionWriterAppend not expected exception error !!!");
} catch (IOException ex) {
} catch (Exception e) {
fail("testDescOrderWithThrowExceptionWriterAppend other ex throw !!!");
} finally {
IOUtils.cleanup(null, writer);
}
}
@Test
public void testMainMethodMapFile() {
String inFile = "mainMethodMapFile.mapfile";
String path = new Path(TEST_DIR, inFile).toString();
String[] args = { path, path };
MapFile.Writer writer = null;
try {
writer = createWriter(inFile, IntWritable.class, Text.class);
writer.append(new IntWritable(1), new Text("test_text1"));
writer.append(new IntWritable(2), new Text("test_text2"));
writer.close();
MapFile.main(args);
} catch (Exception ex) {
fail("testMainMethodMapFile error !!!");
} finally {
IOUtils.cleanup(null, writer);
}
}
/**
* Test getClosest feature.
*
* @throws Exception
*/
@Test
@SuppressWarnings("deprecation")
public void testGetClosest() throws Exception {
// Write a mapfile of simple data: keys are
Path dirName = new Path(TEST_DIR, "testGetClosest.mapfile");
FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(dirName);
// Make an index entry for every third insertion.
MapFile.Writer.setIndexInterval(conf, 3);
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
Text.class, Text.class);
// Assert that the index interval is 1
assertEquals(3, writer.getIndexInterval());
// Add entries up to 100 in intervals of ten.
final int FIRST_KEY = 10;
for (int i = FIRST_KEY; i < 100; i += 10) {
String iStr = Integer.toString(i);
Text t = new Text("00".substring(iStr.length()) + iStr);
writer.append(t, t);
}
writer.close();
// Now do getClosest on created mapfile.
reader = new MapFile.Reader(qualifiedDirName, conf);
Text key = new Text("55");
Text value = new Text();
Text closest = (Text) reader.getClosest(key, value);
// Assert that closest after 55 is 60
assertEquals(new Text("60"), closest);
// Get closest that falls before the passed key: 50
closest = (Text) reader.getClosest(key, value, true);
assertEquals(new Text("50"), closest);
// Test get closest when we pass explicit key
final Text TWENTY = new Text("20");
closest = (Text) reader.getClosest(TWENTY, value);
assertEquals(TWENTY, closest);
closest = (Text) reader.getClosest(TWENTY, value, true);
assertEquals(TWENTY, closest);
// Test what happens at boundaries. Assert if searching a key that is
// less than first key in the mapfile, that the first key is returned.
key = new Text("00");
closest = (Text) reader.getClosest(key, value);
assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
// If we're looking for the first key before, and we pass in a key before
// the first key in the file, we should get null
closest = (Text) reader.getClosest(key, value, true);
assertNull(closest);
// Assert that null is returned if key is > last entry in mapfile.
key = new Text("99");
closest = (Text) reader.getClosest(key, value);
assertNull(closest);
// If we were looking for the key before, we should get the last key
closest = (Text) reader.getClosest(key, value, true);
assertEquals(new Text("90"), closest);
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
@Test
@SuppressWarnings("deprecation")
public void testMidKey() throws Exception {
// Write a mapfile of simple data: keys are
Path dirName = new Path(TEST_DIR, "testMidKey.mapfile");
FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(dirName);
MapFile.Writer writer = null;
MapFile.Reader reader = null;
try {
writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
IntWritable.class, IntWritable.class);
writer.append(new IntWritable(1), new IntWritable(1));
writer.close();
// Now do getClosest on created mapfile.
reader = new MapFile.Reader(qualifiedDirName, conf);
assertEquals(new IntWritable(1), reader.midKey());
} finally {
IOUtils.cleanup(null, writer, reader);
}
}
@Test
@SuppressWarnings("deprecation")
public void testMidKeyEmpty() throws Exception {
// Write a mapfile of simple data: keys are
Path dirName = new Path(TEST_DIR, "testMidKeyEmpty.mapfile");
FileSystem fs = FileSystem.getLocal(conf);
Path qualifiedDirName = fs.makeQualified(dirName);
MapFile.Writer writer = new MapFile.Writer(conf, fs,
qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
writer.close();
// Now do getClosest on created mapfile.
MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
try {
assertEquals(null, reader.midKey());
} finally {
reader.close();
}
}
@Test
public void testMerge() throws Exception {
final String TEST_METHOD_KEY = "testMerge.mapfile";
int SIZE = 10;
int ITERATIONS = 5;
Path[] in = new Path[5];
List<Integer> expected = new ArrayList<Integer>();
for (int j = 0; j < 5; j++) {
try (MapFile.Writer writer = createWriter(TEST_METHOD_KEY + "." + j,
IntWritable.class, Text.class)) {
in[j] = new Path(TEST_DIR, TEST_METHOD_KEY + "." + j);
for (int i = 0; i < SIZE; i++) {
expected.add(i + j);
writer.append(new IntWritable(i + j), new Text("Value:" + (i + j)));
}
}
}
// Sort expected values
Collections.sort(expected);
// Merge all 5 files
MapFile.Merger merger = new MapFile.Merger(conf);
merger.merge(in, true, new Path(TEST_DIR, TEST_METHOD_KEY));
try (MapFile.Reader reader = createReader(TEST_METHOD_KEY,
IntWritable.class)) {
int start = 0;
// test iteration
Text startValue = new Text("Value:" + start);
int i = 0;
while (i++ < ITERATIONS) {
Iterator<Integer> expectedIterator = expected.iterator();
IntWritable key = new IntWritable(start);
Text value = startValue;
IntWritable prev = new IntWritable(start);
while (reader.next(key, value)) {
assertTrue("Next key should be always equal or more",
prev.get() <= key.get());
assertEquals(expectedIterator.next().intValue(), key.get());
prev.set(key.get());
}
reader.reset();
}
}
// inputs should be deleted
for (int j = 0; j < in.length; j++) {
Path path = in[j];
assertFalse("inputs should be deleted",
path.getFileSystem(conf).exists(path));
}
}
}
| 27,138 | 33.353165 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.ByteArrayInputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
/**
* Test cases for IOUtils.java
*/
public class TestIOUtils {
private static final String TEST_FILE_NAME = "test_file";
@Test
public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class);
OutputStream outputStream = Mockito.mock(OutputStream.class);
Mockito.doReturn(-1).when(inputStream).read(new byte[1]);
IOUtils.copyBytes(inputStream, outputStream, 1, true);
Mockito.verify(inputStream, Mockito.atLeastOnce()).close();
Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
}
@Test
public void testCopyBytesShouldCloseInputSteamWhenOutputStreamCloseThrowsException()
throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class);
OutputStream outputStream = Mockito.mock(OutputStream.class);
Mockito.doReturn(-1).when(inputStream).read(new byte[1]);
Mockito.doThrow(new IOException()).when(outputStream).close();
try{
IOUtils.copyBytes(inputStream, outputStream, 1, true);
} catch (IOException e) {
}
Mockito.verify(inputStream, Mockito.atLeastOnce()).close();
Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
}
@Test
public void testCopyBytesShouldNotCloseStreamsWhenCloseIsFalse()
throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class);
OutputStream outputStream = Mockito.mock(OutputStream.class);
Mockito.doReturn(-1).when(inputStream).read(new byte[1]);
IOUtils.copyBytes(inputStream, outputStream, 1, false);
Mockito.verify(inputStream, Mockito.atMost(0)).close();
Mockito.verify(outputStream, Mockito.atMost(0)).close();
}
@Test
public void testCopyBytesWithCountShouldCloseStreamsWhenCloseIsTrue()
throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class);
OutputStream outputStream = Mockito.mock(OutputStream.class);
Mockito.doReturn(-1).when(inputStream).read(new byte[4096], 0, 1);
IOUtils.copyBytes(inputStream, outputStream, (long) 1, true);
Mockito.verify(inputStream, Mockito.atLeastOnce()).close();
Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
}
@Test
public void testCopyBytesWithCountShouldNotCloseStreamsWhenCloseIsFalse()
throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class);
OutputStream outputStream = Mockito.mock(OutputStream.class);
Mockito.doReturn(-1).when(inputStream).read(new byte[4096], 0, 1);
IOUtils.copyBytes(inputStream, outputStream, (long) 1, false);
Mockito.verify(inputStream, Mockito.atMost(0)).close();
Mockito.verify(outputStream, Mockito.atMost(0)).close();
}
@Test
public void testCopyBytesWithCountShouldThrowOutTheStreamClosureExceptions()
throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class);
OutputStream outputStream = Mockito.mock(OutputStream.class);
Mockito.doReturn(-1).when(inputStream).read(new byte[4096], 0, 1);
Mockito.doThrow(new IOException("Exception in closing the stream")).when(
outputStream).close();
try {
IOUtils.copyBytes(inputStream, outputStream, (long) 1, true);
fail("Should throw out the exception");
} catch (IOException e) {
assertEquals("Not throwing the expected exception.",
"Exception in closing the stream", e.getMessage());
}
Mockito.verify(inputStream, Mockito.atLeastOnce()).close();
Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
}
@Test
public void testWriteFully() throws IOException {
final int INPUT_BUFFER_LEN = 10000;
final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
byte[] input = new byte[INPUT_BUFFER_LEN];
for (int i = 0; i < input.length; i++) {
input[i] = (byte)(i & 0xff);
}
byte[] output = new byte[input.length];
try {
RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
FileChannel fc = raf.getChannel();
ByteBuffer buf = ByteBuffer.wrap(input);
IOUtils.writeFully(fc, buf);
raf.seek(0);
raf.read(output);
for (int i = 0; i < input.length; i++) {
assertEquals(input[i], output[i]);
}
buf.rewind();
IOUtils.writeFully(fc, buf, HALFWAY);
for (int i = 0; i < HALFWAY; i++) {
assertEquals(input[i], output[i]);
}
raf.seek(0);
raf.read(output);
for (int i = HALFWAY; i < input.length; i++) {
assertEquals(input[i - HALFWAY], output[i]);
}
} finally {
File f = new File(TEST_FILE_NAME);
if (f.exists()) {
f.delete();
}
}
}
@Test
public void testWrappedReadForCompressedData() throws IOException {
byte[] buf = new byte[2];
InputStream mockStream = Mockito.mock(InputStream.class);
Mockito.when(mockStream.read(buf, 0, 1)).thenReturn(1);
Mockito.when(mockStream.read(buf, 0, 2)).thenThrow(
new java.lang.InternalError());
try {
assertEquals("Check expected value", 1,
IOUtils.wrappedReadForCompressedData(mockStream, buf, 0, 1));
} catch (IOException ioe) {
fail("Unexpected error while reading");
}
try {
IOUtils.wrappedReadForCompressedData(mockStream, buf, 0, 2);
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Error while reading compressed data", ioe);
}
}
@Test
public void testSkipFully() throws IOException {
byte inArray[] = new byte[] {0, 1, 2, 3, 4};
ByteArrayInputStream in = new ByteArrayInputStream(inArray);
try {
in.mark(inArray.length);
IOUtils.skipFully(in, 2);
IOUtils.skipFully(in, 2);
try {
IOUtils.skipFully(in, 2);
fail("expected to get a PrematureEOFException");
} catch (EOFException e) {
assertEquals("Premature EOF from inputStream " +
"after skipping 1 byte(s).",e.getMessage());
}
in.reset();
try {
IOUtils.skipFully(in, 20);
fail("expected to get a PrematureEOFException");
} catch (EOFException e) {
assertEquals("Premature EOF from inputStream " +
"after skipping 5 byte(s).",e.getMessage());
}
in.reset();
IOUtils.skipFully(in, 5);
try {
IOUtils.skipFully(in, 10);
fail("expected to get a PrematureEOFException");
} catch (EOFException e) {
assertEquals("Premature EOF from inputStream " +
"after skipping 0 byte(s).",e.getMessage());
}
} finally {
in.close();
}
}
private static enum NoEntry3Filter implements FilenameFilter {
INSTANCE;
@Override
public boolean accept(File dir, String name) {
return !name.equals("entry3");
}
}
@Test
public void testListDirectory() throws IOException {
File dir = new File("testListDirectory");
Files.createDirectory(dir.toPath());
try {
Set<String> entries = new HashSet<String>();
entries.add("entry1");
entries.add("entry2");
entries.add("entry3");
for (String entry : entries) {
Files.createDirectory(new File(dir, entry).toPath());
}
List<String> list = IOUtils.listDirectory(dir,
NoEntry3Filter.INSTANCE);
for (String entry : list) {
Assert.assertTrue(entries.remove(entry));
}
Assert.assertTrue(entries.contains("entry3"));
list = IOUtils.listDirectory(dir, null);
for (String entry : list) {
entries.remove(entry);
}
Assert.assertTrue(entries.isEmpty());
} finally {
FileUtils.deleteDirectory(dir);
}
}
}
| 9,154 | 33.942748 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBoundedByteArrayOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.IOException;
import junit.framework.TestCase;
import java.util.Arrays;
import java.util.Random;
/** Unit tests for BoundedByteArrayOutputStream */
public class TestBoundedByteArrayOutputStream extends TestCase {
private static final int SIZE = 1024;
private static final byte[] INPUT = new byte[SIZE];
static {
new Random().nextBytes(INPUT);
}
public void testBoundedStream() throws IOException {
BoundedByteArrayOutputStream stream =
new BoundedByteArrayOutputStream(SIZE);
// Write to the stream, get the data back and check for contents
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
Arrays.equals(INPUT, stream.getBuffer()));
// Try writing beyond end of buffer. Should throw an exception
boolean caughtException = false;
try {
stream.write(INPUT[0]);
} catch (Exception e) {
caughtException = true;
}
assertTrue("Writing beyond limit did not throw an exception",
caughtException);
//Reset the stream and try, should succeed
stream.reset();
assertTrue("Limit did not get reset correctly",
(stream.getLimit() == SIZE));
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
Arrays.equals(INPUT, stream.getBuffer()));
// Try writing one more byte, should fail
caughtException = false;
try {
stream.write(INPUT[0]);
} catch (Exception e) {
caughtException = true;
}
// Reset the stream, but set a lower limit. Writing beyond
// the limit should throw an exception
stream.reset(SIZE - 1);
assertTrue("Limit did not get reset correctly",
(stream.getLimit() == SIZE -1));
caughtException = false;
try {
stream.write(INPUT, 0, SIZE);
} catch (Exception e) {
caughtException = true;
}
assertTrue("Writing beyond limit did not throw an exception",
caughtException);
}
static class ResettableBoundedByteArrayOutputStream
extends BoundedByteArrayOutputStream {
public ResettableBoundedByteArrayOutputStream(int capacity) {
super(capacity);
}
public void resetBuffer(byte[] buf, int offset, int length) {
super.resetBuffer(buf, offset, length);
}
}
public void testResetBuffer() throws IOException {
ResettableBoundedByteArrayOutputStream stream =
new ResettableBoundedByteArrayOutputStream(SIZE);
// Write to the stream, get the data back and check for contents
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
Arrays.equals(INPUT, stream.getBuffer()));
// Try writing beyond end of buffer. Should throw an exception
boolean caughtException = false;
try {
stream.write(INPUT[0]);
} catch (Exception e) {
caughtException = true;
}
assertTrue("Writing beyond limit did not throw an exception",
caughtException);
//Reset the stream and try, should succeed
byte[] newBuf = new byte[SIZE];
stream.resetBuffer(newBuf, 0, newBuf.length);
assertTrue("Limit did not get reset correctly",
(stream.getLimit() == SIZE));
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
Arrays.equals(INPUT, stream.getBuffer()));
// Try writing one more byte, should fail
caughtException = false;
try {
stream.write(INPUT[0]);
} catch (Exception e) {
caughtException = true;
}
assertTrue("Writing beyond limit did not throw an exception",
caughtException);
}
}
| 4,473 | 29.026846 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import java.util.*;
import java.util.concurrent.atomic.AtomicReference;
import junit.framework.TestCase;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.SequenceFile.CompressionType;
/** Support for flat files of binary key/value pairs. */
public class TestSetFile extends TestCase {
private static final Log LOG = LogFactory.getLog(TestSetFile.class);
private static String FILE =
System.getProperty("test.build.data",".") + "/test.set";
private static Configuration conf = new Configuration();
public TestSetFile(String name) { super(name); }
public void testSetFile() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
try {
RandomDatum[] data = generate(10000);
writeTest(fs, data, FILE, CompressionType.NONE);
readTest(fs, data, FILE);
writeTest(fs, data, FILE, CompressionType.BLOCK);
readTest(fs, data, FILE);
} finally {
fs.close();
}
}
/**
* test {@code SetFile.Reader} methods
* next(), get() in combination
*/
public void testSetFileAccessMethods() {
try {
FileSystem fs = FileSystem.getLocal(conf);
int size = 10;
writeData(fs, size);
SetFile.Reader reader = createReader(fs);
assertTrue("testSetFileWithConstruction1 error !!!", reader.next(new IntWritable(0)));
// don't know why reader.get(i) return i+1
assertEquals("testSetFileWithConstruction2 error !!!", new IntWritable(size/2 + 1), reader.get(new IntWritable(size/2)));
assertNull("testSetFileWithConstruction3 error !!!", reader.get(new IntWritable(size*2)));
} catch (Exception ex) {
fail("testSetFileWithConstruction error !!!");
}
}
private SetFile.Reader createReader(FileSystem fs) throws IOException {
return new SetFile.Reader(fs, FILE,
WritableComparator.get(IntWritable.class), conf);
}
@SuppressWarnings("deprecation")
private void writeData(FileSystem fs, int elementSize) throws IOException {
MapFile.delete(fs, FILE);
SetFile.Writer writer = new SetFile.Writer(fs, FILE, IntWritable.class);
for (int i = 0; i < elementSize; i++)
writer.append(new IntWritable(i));
writer.close();
}
private static RandomDatum[] generate(int count) {
LOG.info("generating " + count + " records in memory");
RandomDatum[] data = new RandomDatum[count];
RandomDatum.Generator generator = new RandomDatum.Generator();
for (int i = 0; i < count; i++) {
generator.next();
data[i] = generator.getValue();
}
LOG.info("sorting " + count + " records");
Arrays.sort(data);
return data;
}
private static void writeTest(FileSystem fs, RandomDatum[] data,
String file, CompressionType compress)
throws IOException {
MapFile.delete(fs, file);
LOG.info("creating with " + data.length + " records");
SetFile.Writer writer =
new SetFile.Writer(conf, fs, file,
WritableComparator.get(RandomDatum.class),
compress);
for (int i = 0; i < data.length; i++)
writer.append(data[i]);
writer.close();
}
private static void readTest(FileSystem fs, RandomDatum[] data, String file)
throws IOException {
RandomDatum v = new RandomDatum();
int sample = (int)Math.sqrt(data.length);
Random random = new Random();
LOG.info("reading " + sample + " records");
SetFile.Reader reader = new SetFile.Reader(fs, file, conf);
for (int i = 0; i < sample; i++) {
if (!reader.seek(data[random.nextInt(data.length)]))
throw new RuntimeException("wrong value at " + i);
}
reader.close();
LOG.info("done reading " + data.length);
}
/** For debugging and testing. */
public static void main(String[] args) throws Exception {
int count = 1024 * 1024;
boolean create = true;
boolean check = true;
String file = FILE;
String compress = "NONE";
String usage = "Usage: TestSetFile [-count N] [-nocreate] [-nocheck] [-compress type] file";
if (args.length == 0) {
System.err.println(usage);
System.exit(-1);
}
int i = 0;
Path fpath=null;
FileSystem fs = null;
try {
for (; i < args.length; i++) { // parse command line
if (args[i] == null) {
continue;
} else if (args[i].equals("-count")) {
count = Integer.parseInt(args[++i]);
} else if (args[i].equals("-nocreate")) {
create = false;
} else if (args[i].equals("-nocheck")) {
check = false;
} else if (args[i].equals("-compress")) {
compress = args[++i];
} else {
// file is required parameter
file = args[i];
fpath=new Path(file);
}
}
fs = fpath.getFileSystem(conf);
LOG.info("count = " + count);
LOG.info("create = " + create);
LOG.info("check = " + check);
LOG.info("compress = " + compress);
LOG.info("file = " + file);
RandomDatum[] data = generate(count);
if (create) {
writeTest(fs, data, file, CompressionType.valueOf(compress));
}
if (check) {
readTest(fs, data, file);
}
} finally {
fs.close();
}
}
}
| 6,284 | 31.564767 | 133 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import org.junit.Assert;
import junit.framework.TestCase;
/** Unit tests for ArrayWritable */
public class TestArrayWritable extends TestCase {
static class TextArrayWritable extends ArrayWritable {
public TextArrayWritable() {
super(Text.class);
}
}
public TestArrayWritable(String name) {
super(name);
}
/**
* If valueClass is undefined, readFields should throw an exception indicating
* that the field is null. Otherwise, readFields should succeed.
*/
public void testThrowUndefinedValueException() throws IOException {
// Get a buffer containing a simple text array
Text[] elements = {new Text("zero"), new Text("one"), new Text("two")};
TextArrayWritable sourceArray = new TextArrayWritable();
sourceArray.set(elements);
// Write it to a normal output buffer
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
sourceArray.write(out);
// Read the output buffer with TextReadable. Since the valueClass is defined,
// this should succeed
TextArrayWritable destArray = new TextArrayWritable();
in.reset(out.getData(), out.getLength());
destArray.readFields(in);
Writable[] destElements = destArray.get();
assertTrue(destElements.length == elements.length);
for (int i = 0; i < elements.length; i++) {
assertEquals(destElements[i],elements[i]);
}
}
/**
* test {@link ArrayWritable} toArray() method
*/
public void testArrayWritableToArray() {
Text[] elements = {new Text("zero"), new Text("one"), new Text("two")};
TextArrayWritable arrayWritable = new TextArrayWritable();
arrayWritable.set(elements);
Object array = arrayWritable.toArray();
assertTrue("TestArrayWritable testArrayWritableToArray error!!! ", array instanceof Text[]);
Text[] destElements = (Text[]) array;
for (int i = 0; i < elements.length; i++) {
assertEquals(destElements[i], elements[i]);
}
}
/**
* test {@link ArrayWritable} constructor with null
*/
public void testNullArgument() {
try {
Class<? extends Writable> valueClass = null;
new ArrayWritable(valueClass);
fail("testNullArgument error !!!");
} catch (IllegalArgumentException exp) {
//should be for test pass
} catch (Exception e) {
fail("testNullArgument error !!!");
}
}
/**
* test {@link ArrayWritable} constructor with {@code String[]} as a parameter
*/
@SuppressWarnings("deprecation")
public void testArrayWritableStringConstructor() {
String[] original = { "test1", "test2", "test3" };
ArrayWritable arrayWritable = new ArrayWritable(original);
assertEquals("testArrayWritableStringConstructor class error!!!",
UTF8.class, arrayWritable.getValueClass());
Assert.assertArrayEquals("testArrayWritableStringConstructor toString error!!!",
original, arrayWritable.toStrings());
}
}
| 3,810 | 32.725664 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestObjectWritableProtos.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.junit.Test;
import com.google.protobuf.DescriptorProtos;
import com.google.protobuf.Message;
/**
* Test case for the use of Protocol Buffers within ObjectWritable.
*/
public class TestObjectWritableProtos {
@Test
public void testProtoBufs() throws IOException {
doTest(1);
}
@Test
public void testProtoBufs2() throws IOException {
doTest(2);
}
@Test
public void testProtoBufs3() throws IOException {
doTest(3);
}
/**
* Write a protobuf to a buffer 'numProtos' times, and then
* read them back, making sure all data comes through correctly.
*/
private void doTest(int numProtos) throws IOException {
Configuration conf = new Configuration();
DataOutputBuffer out = new DataOutputBuffer();
// Write numProtos protobufs to the buffer
Message[] sent = new Message[numProtos];
for (int i = 0; i < numProtos; i++) {
// Construct a test protocol buffer using one of the
// protos that ships with the protobuf library
Message testProto = DescriptorProtos.EnumValueDescriptorProto.newBuilder()
.setName("test" + i).setNumber(i).build();
ObjectWritable.writeObject(out, testProto,
DescriptorProtos.EnumValueDescriptorProto.class, conf);
sent[i] = testProto;
}
// Read back the data
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
for (int i = 0; i < numProtos; i++) {
Message received = (Message)ObjectWritable.readObject(in, conf);
assertEquals(sent[i], received);
}
}
}
| 2,545 | 30.04878 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestGenericWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
/**
* TestCase for {@link GenericWritable} class.
* @see TestWritable#testWritable(Writable)
*/
public class TestGenericWritable extends TestCase {
private Configuration conf;
public static final String CONF_TEST_KEY = "test.generic.writable";
public static final String CONF_TEST_VALUE = "dummy";
@Override
protected void setUp() throws Exception {
super.setUp();
conf = new Configuration();
//set the configuration parameter
conf.set(CONF_TEST_KEY, CONF_TEST_VALUE);
}
/** Dummy class for testing {@link GenericWritable} */
public static class Foo implements Writable {
private String foo = "foo";
@Override
public void readFields(DataInput in) throws IOException {
foo = Text.readString(in);
}
@Override
public void write(DataOutput out) throws IOException {
Text.writeString(out, foo);
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Foo))
return false;
return this.foo.equals(((Foo)obj).foo);
}
}
/** Dummy class for testing {@link GenericWritable} */
public static class Bar implements Writable, Configurable {
private int bar = 42; //The Answer to The Ultimate Question Of Life, the Universe and Everything
private Configuration conf = null;
@Override
public void readFields(DataInput in) throws IOException {
bar = in.readInt();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(bar);
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Bar))
return false;
return this.bar == ((Bar)obj).bar;
}
}
/** Dummy class for testing {@link GenericWritable} */
public static class Baz extends Bar {
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
//needs a configuration parameter
assertEquals("Configuration is not set for the wrapped object",
CONF_TEST_VALUE, getConf().get(CONF_TEST_KEY));
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
}
}
/** Dummy class for testing {@link GenericWritable} */
public static class FooGenericWritable extends GenericWritable {
@Override
@SuppressWarnings("unchecked")
protected Class<? extends Writable>[] getTypes() {
return new Class[] {Foo.class, Bar.class, Baz.class};
}
@Override
public boolean equals(Object obj) {
if(! (obj instanceof FooGenericWritable))
return false;
return get().equals(((FooGenericWritable)obj).get());
}
}
public void testFooWritable() throws Exception {
System.out.println("Testing Writable wrapped in GenericWritable");
FooGenericWritable generic = new FooGenericWritable();
generic.setConf(conf);
Foo foo = new Foo();
generic.set(foo);
TestWritable.testWritable(generic);
}
public void testBarWritable() throws Exception {
System.out.println("Testing Writable, Configurable wrapped in GenericWritable");
FooGenericWritable generic = new FooGenericWritable();
generic.setConf(conf);
Bar bar = new Bar();
bar.setConf(conf);
generic.set(bar);
//test writing generic writable
FooGenericWritable after
= (FooGenericWritable)TestWritable.testWritable(generic, conf);
//test configuration
System.out.println("Testing if Configuration is passed to wrapped classes");
assertTrue(after.get() instanceof Configurable);
assertNotNull(((Configurable)after.get()).getConf());
}
public void testBazWritable() throws Exception {
System.out.println("Testing for GenericWritable to find class names");
FooGenericWritable generic = new FooGenericWritable();
generic.setConf(conf);
Baz baz = new Baz();
generic.set(baz);
TestWritable.testWritable(generic, conf);
}
public void testSet() throws Exception {
Foo foo = new Foo();
FooGenericWritable generic = new FooGenericWritable();
//exception should not occur
generic.set(foo);
try {
//exception should occur, since IntWritable is not registered
generic = new FooGenericWritable();
generic.set(new IntWritable(1));
fail("Generic writable should have thrown an exception for a Writable not registered");
}catch (RuntimeException e) {
//ignore
}
}
public void testGet() throws Exception {
Foo foo = new Foo();
FooGenericWritable generic = new FooGenericWritable();
generic.set(foo);
assertEquals(foo, generic.get());
}
}
| 5,804 | 30.378378 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSequenceFileSerialization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile.Reader;
import org.apache.hadoop.io.SequenceFile.Writer;
public class TestSequenceFileSerialization extends TestCase {
private Configuration conf;
private FileSystem fs;
@Override
protected void setUp() throws Exception {
conf = new Configuration();
conf.set("io.serializations",
"org.apache.hadoop.io.serializer.JavaSerialization");
fs = FileSystem.getLocal(conf);
}
@Override
protected void tearDown() throws Exception {
fs.close();
}
public void testJavaSerialization() throws Exception {
Path file = new Path(System.getProperty("test.build.data",".") +
"/testseqser.seq");
fs.delete(file, true);
Writer writer = SequenceFile.createWriter(fs, conf, file, Long.class,
String.class);
writer.append(1L, "one");
writer.append(2L, "two");
writer.close();
Reader reader = new Reader(fs, file, conf);
assertEquals(1L, reader.next((Object) null));
assertEquals("one", reader.getCurrentValue((Object) null));
assertEquals(2L, reader.next((Object) null));
assertEquals("two", reader.getCurrentValue((Object) null));
assertNull(reader.next((Object) null));
reader.close();
}
}
| 2,254 | 31.214286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestVersionedWritable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import java.util.Random;
import junit.framework.TestCase;
/** Unit tests for VersionedWritable. */
public class TestVersionedWritable extends TestCase {
public TestVersionedWritable(String name) { super(name); }
/** Example class used in test cases below. */
public static class SimpleVersionedWritable extends VersionedWritable {
private static final Random RANDOM = new Random();
int state = RANDOM.nextInt();
private static byte VERSION = 1;
@Override
public byte getVersion() {
return VERSION;
}
@Override
public void write(DataOutput out) throws IOException {
super.write(out); // version.
out.writeInt(state);
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in); // version
this.state = in.readInt();
}
public static SimpleVersionedWritable read(DataInput in) throws IOException {
SimpleVersionedWritable result = new SimpleVersionedWritable();
result.readFields(in);
return result;
}
/** Required by test code, below. */
@Override
public boolean equals(Object o) {
if (!(o instanceof SimpleVersionedWritable))
return false;
SimpleVersionedWritable other = (SimpleVersionedWritable)o;
return this.state == other.state;
}
}
public static class AdvancedVersionedWritable extends SimpleVersionedWritable {
String shortTestString = "Now is the time for all good men to come to the aid of the Party";
String longTestString = "Four score and twenty years ago. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah.";
String compressableTestString =
"Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. " +
"Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. " +
"Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. ";
SimpleVersionedWritable containedObject = new SimpleVersionedWritable();
String[] testStringArray = {"The", "Quick", "Brown", "Fox", "Jumped", "Over", "The", "Lazy", "Dog"};
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
out.writeUTF(shortTestString);
WritableUtils.writeString(out, longTestString);
int comp = WritableUtils.writeCompressedString(out, compressableTestString);
System.out.println("Compression is " + comp + "%");
containedObject.write(out); // Warning if this is a recursive call, you need a null value.
WritableUtils.writeStringArray(out, testStringArray);
}
@Override
public void readFields(DataInput in) throws IOException {
super.readFields(in);
shortTestString = in.readUTF();
longTestString = WritableUtils.readString(in);
compressableTestString = WritableUtils.readCompressedString(in);
containedObject.readFields(in); // Warning if this is a recursive call, you need a null value.
testStringArray = WritableUtils.readStringArray(in);
}
@Override
public boolean equals(Object o) {
super.equals(o);
if (!shortTestString.equals(((AdvancedVersionedWritable)o).shortTestString)) { return false;}
if (!longTestString.equals(((AdvancedVersionedWritable)o).longTestString)) { return false;}
if (!compressableTestString.equals(((AdvancedVersionedWritable)o).compressableTestString)) { return false;}
if (testStringArray.length != ((AdvancedVersionedWritable)o).testStringArray.length) { return false;}
for(int i=0;i< testStringArray.length;i++){
if (!testStringArray[i].equals(((AdvancedVersionedWritable)o).testStringArray[i])) {
return false;
}
}
if (!containedObject.equals(((AdvancedVersionedWritable)o).containedObject)) { return false;}
return true;
}
}
/* This one checks that version mismatch is thrown... */
public static class SimpleVersionedWritableV2 extends SimpleVersionedWritable {
static byte VERSION = 2;
@Override
public byte getVersion() {
return VERSION;
}
}
/** Test 1: Check that SimpleVersionedWritable. */
public void testSimpleVersionedWritable() throws Exception {
TestWritable.testWritable(new SimpleVersionedWritable());
}
/** Test 2: Check that AdvancedVersionedWritable Works (well, why wouldn't it!). */
public void testAdvancedVersionedWritable() throws Exception {
TestWritable.testWritable(new AdvancedVersionedWritable());
}
/** Test 3: Check that SimpleVersionedWritable throws an Exception. */
public void testSimpleVersionedWritableMismatch() throws Exception {
TestVersionedWritable.testVersionedWritable(new SimpleVersionedWritable(), new SimpleVersionedWritableV2());
}
/** Utility method for testing VersionedWritables. */
public static void testVersionedWritable(Writable before, Writable after) throws Exception {
DataOutputBuffer dob = new DataOutputBuffer();
before.write(dob);
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), dob.getLength());
try {
after.readFields(dib);
} catch (VersionMismatchException vmme) {
System.out.println("Good, we expected this:" + vmme);
return;
}
throw new Exception("A Version Mismatch Didn't Happen!");
}
}
| 6,234 | 32.164894 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/RandomDatum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
import java.util.Random;
public class RandomDatum implements WritableComparable<RandomDatum> {
private int length;
private byte[] data;
public RandomDatum() {}
public RandomDatum(Random random) {
length = 10 + (int) Math.pow(10.0, random.nextFloat() * 3.0);
data = new byte[length];
random.nextBytes(data);
}
public int getLength() {
return length;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(length);
out.write(data);
}
@Override
public void readFields(DataInput in) throws IOException {
length = in.readInt();
if (data == null || length > data.length)
data = new byte[length];
in.readFully(data, 0, length);
}
@Override
public int compareTo(RandomDatum o) {
return WritableComparator.compareBytes(this.data, 0, this.length,
o.data, 0, o.length);
}
@Override
public boolean equals(Object o) {
return compareTo((RandomDatum)o) == 0;
}
@Override
public int hashCode() {
return Arrays.hashCode(this.data);
}
private static final char[] HEX_DIGITS =
{'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
/** Returns a string representation of this object. */
@Override
public String toString() {
StringBuilder buf = new StringBuilder(length*2);
for (int i = 0; i < length; i++) {
int b = data[i];
buf.append(HEX_DIGITS[(b >> 4) & 0xf]);
buf.append(HEX_DIGITS[b & 0xf]);
}
return buf.toString();
}
public static class Generator {
Random random;
private RandomDatum key;
private RandomDatum value;
public Generator() { random = new Random(); }
public Generator(int seed) { random = new Random(seed); }
public RandomDatum getKey() { return key; }
public RandomDatum getValue() { return value; }
public void next() {
key = new RandomDatum(random);
value = new RandomDatum(random);
}
}
/** A WritableComparator optimized for RandomDatum. */
public static class Comparator extends WritableComparator {
public Comparator() {
super(RandomDatum.class);
}
@Override
public int compare(byte[] b1, int s1, int l1,
byte[] b2, int s2, int l2) {
int n1 = readInt(b1, s1);
int n2 = readInt(b2, s2);
return compareBytes(b1, s1+4, n1, b2, s2+4, n2);
}
}
}
| 3,369 | 26.398374 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeek.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import java.util.Random;
import java.util.StringTokenizer;
import junit.framework.TestCase;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.file.tfile.RandomDistribution.DiscreteRNG;
import org.apache.hadoop.io.file.tfile.TFile.Reader;
import org.apache.hadoop.io.file.tfile.TFile.Writer;
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner;
/**
* test the performance for seek.
*
*/
public class TestTFileSeek extends TestCase {
private MyOptions options;
private Configuration conf;
private Path path;
private FileSystem fs;
private NanoTimer timer;
private Random rng;
private DiscreteRNG keyLenGen;
private KVGenerator kvGen;
@Override
public void setUp() throws IOException {
if (options == null) {
options = new MyOptions(new String[0]);
}
conf = new Configuration();
conf.setInt("tfile.fs.input.buffer.size", options.fsInputBufferSize);
conf.setInt("tfile.fs.output.buffer.size", options.fsOutputBufferSize);
path = new Path(new Path(options.rootDir), options.file);
fs = path.getFileSystem(conf);
timer = new NanoTimer(false);
rng = new Random(options.seed);
keyLenGen =
new RandomDistribution.Zipf(new Random(rng.nextLong()),
options.minKeyLen, options.maxKeyLen, 1.2);
DiscreteRNG valLenGen =
new RandomDistribution.Flat(new Random(rng.nextLong()),
options.minValLength, options.maxValLength);
DiscreteRNG wordLenGen =
new RandomDistribution.Flat(new Random(rng.nextLong()),
options.minWordLen, options.maxWordLen);
kvGen =
new KVGenerator(rng, true, keyLenGen, valLenGen, wordLenGen,
options.dictSize);
}
@Override
public void tearDown() throws IOException {
fs.delete(path, true);
}
private static FSDataOutputStream createFSOutput(Path name, FileSystem fs)
throws IOException {
if (fs.exists(name)) {
fs.delete(name, true);
}
FSDataOutputStream fout = fs.create(name);
return fout;
}
private void createTFile() throws IOException {
long totalBytes = 0;
FSDataOutputStream fout = createFSOutput(path, fs);
try {
Writer writer =
new Writer(fout, options.minBlockSize, options.compress, "memcmp",
conf);
try {
BytesWritable key = new BytesWritable();
BytesWritable val = new BytesWritable();
timer.start();
for (long i = 0; true; ++i) {
if (i % 1000 == 0) { // test the size for every 1000 rows.
if (fs.getFileStatus(path).getLen() >= options.fileSize) {
break;
}
}
kvGen.next(key, val, false);
writer.append(key.get(), 0, key.getSize(), val.get(), 0, val
.getSize());
totalBytes += key.getSize();
totalBytes += val.getSize();
}
timer.stop();
}
finally {
writer.close();
}
}
finally {
fout.close();
}
double duration = (double)timer.read()/1000; // in us.
long fsize = fs.getFileStatus(path).getLen();
System.out.printf(
"time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n",
timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes
/ duration);
System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n",
timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}
public void seekTFile() throws IOException {
int miss = 0;
long totalBytes = 0;
FSDataInputStream fsdis = fs.open(path);
Reader reader =
new Reader(fsdis, fs.getFileStatus(path).getLen(), conf);
KeySampler kSampler =
new KeySampler(rng, reader.getFirstKey(), reader.getLastKey(),
keyLenGen);
Scanner scanner = reader.createScanner();
BytesWritable key = new BytesWritable();
BytesWritable val = new BytesWritable();
timer.reset();
timer.start();
for (int i = 0; i < options.seekCount; ++i) {
kSampler.next(key);
scanner.lowerBound(key.get(), 0, key.getSize());
if (!scanner.atEnd()) {
scanner.entry().get(key, val);
totalBytes += key.getSize();
totalBytes += val.getSize();
}
else {
++miss;
}
}
timer.stop();
double duration = (double) timer.read() / 1000; // in us.
System.out.printf(
"time: %s...avg seek: %s...%d hit...%d miss...avg I/O size: %.2fKB\n",
timer.toString(), NanoTimer.nanoTimeToString(timer.read()
/ options.seekCount), options.seekCount - miss, miss,
(double) totalBytes / 1024 / (options.seekCount - miss));
}
public void testSeeks() throws IOException {
String[] supported = TFile.getSupportedCompressionAlgorithms();
boolean proceed = false;
for (String c : supported) {
if (c.equals(options.compress)) {
proceed = true;
break;
}
}
if (!proceed) {
System.out.println("Skipped for " + options.compress);
return;
}
if (options.doCreate()) {
createTFile();
}
if (options.doRead()) {
seekTFile();
}
}
private static class IntegerRange {
private final int from, to;
public IntegerRange(int from, int to) {
this.from = from;
this.to = to;
}
public static IntegerRange parse(String s) throws ParseException {
StringTokenizer st = new StringTokenizer(s, " \t,");
if (st.countTokens() != 2) {
throw new ParseException("Bad integer specification: " + s);
}
int from = Integer.parseInt(st.nextToken());
int to = Integer.parseInt(st.nextToken());
return new IntegerRange(from, to);
}
public int from() {
return from;
}
public int to() {
return to;
}
}
private static class MyOptions {
// hard coded constants
int dictSize = 1000;
int minWordLen = 5;
int maxWordLen = 20;
int osInputBufferSize = 64 * 1024;
int osOutputBufferSize = 64 * 1024;
int fsInputBufferSizeNone = 0;
int fsInputBufferSizeLzo = 0;
int fsInputBufferSizeGz = 0;
int fsOutputBufferSizeNone = 1;
int fsOutputBufferSizeLzo = 1;
int fsOutputBufferSizeGz = 1;
String rootDir =
System.getProperty("test.build.data", "/tmp/tfile-test");
String file = "TestTFileSeek";
String compress = "gz";
int minKeyLen = 10;
int maxKeyLen = 50;
int minValLength = 100;
int maxValLength = 200;
int minBlockSize = 64 * 1024;
int fsOutputBufferSize = 1;
int fsInputBufferSize = 0;
long fileSize = 3 * 1024 * 1024;
long seekCount = 1000;
long seed;
static final int OP_CREATE = 1;
static final int OP_READ = 2;
int op = OP_CREATE | OP_READ;
boolean proceed = false;
public MyOptions(String[] args) {
seed = System.nanoTime();
try {
Options opts = buildOptions();
CommandLineParser parser = new GnuParser();
CommandLine line = parser.parse(opts, args, true);
processOptions(line, opts);
validateOptions();
}
catch (ParseException e) {
System.out.println(e.getMessage());
System.out.println("Try \"--help\" option for details.");
setStopProceed();
}
}
public boolean proceed() {
return proceed;
}
private Options buildOptions() {
Option compress =
OptionBuilder.withLongOpt("compress").withArgName("[none|lzo|gz]")
.hasArg().withDescription("compression scheme").create('c');
Option fileSize =
OptionBuilder.withLongOpt("file-size").withArgName("size-in-MB")
.hasArg().withDescription("target size of the file (in MB).")
.create('s');
Option fsInputBufferSz =
OptionBuilder.withLongOpt("fs-input-buffer").withArgName("size")
.hasArg().withDescription(
"size of the file system input buffer (in bytes).").create(
'i');
Option fsOutputBufferSize =
OptionBuilder.withLongOpt("fs-output-buffer").withArgName("size")
.hasArg().withDescription(
"size of the file system output buffer (in bytes).").create(
'o');
Option keyLen =
OptionBuilder
.withLongOpt("key-length")
.withArgName("min,max")
.hasArg()
.withDescription(
"the length range of the key (in bytes)")
.create('k');
Option valueLen =
OptionBuilder
.withLongOpt("value-length")
.withArgName("min,max")
.hasArg()
.withDescription(
"the length range of the value (in bytes)")
.create('v');
Option blockSz =
OptionBuilder.withLongOpt("block").withArgName("size-in-KB").hasArg()
.withDescription("minimum block size (in KB)").create('b');
Option seed =
OptionBuilder.withLongOpt("seed").withArgName("long-int").hasArg()
.withDescription("specify the seed").create('S');
Option operation =
OptionBuilder.withLongOpt("operation").withArgName("r|w|rw").hasArg()
.withDescription(
"action: seek-only, create-only, seek-after-create").create(
'x');
Option rootDir =
OptionBuilder.withLongOpt("root-dir").withArgName("path").hasArg()
.withDescription(
"specify root directory where files will be created.")
.create('r');
Option file =
OptionBuilder.withLongOpt("file").withArgName("name").hasArg()
.withDescription("specify the file name to be created or read.")
.create('f');
Option seekCount =
OptionBuilder
.withLongOpt("seek")
.withArgName("count")
.hasArg()
.withDescription(
"specify how many seek operations we perform (requires -x r or -x rw.")
.create('n');
Option help =
OptionBuilder.withLongOpt("help").hasArg(false).withDescription(
"show this screen").create("h");
return new Options().addOption(compress).addOption(fileSize).addOption(
fsInputBufferSz).addOption(fsOutputBufferSize).addOption(keyLen)
.addOption(blockSz).addOption(rootDir).addOption(valueLen).addOption(
operation).addOption(seekCount).addOption(file).addOption(help);
}
private void processOptions(CommandLine line, Options opts)
throws ParseException {
// --help -h and --version -V must be processed first.
if (line.hasOption('h')) {
HelpFormatter formatter = new HelpFormatter();
System.out.println("TFile and SeqFile benchmark.");
System.out.println();
formatter.printHelp(100,
"java ... TestTFileSeqFileComparison [options]",
"\nSupported options:", opts, "");
return;
}
if (line.hasOption('c')) {
compress = line.getOptionValue('c');
}
if (line.hasOption('d')) {
dictSize = Integer.parseInt(line.getOptionValue('d'));
}
if (line.hasOption('s')) {
fileSize = Long.parseLong(line.getOptionValue('s')) * 1024 * 1024;
}
if (line.hasOption('i')) {
fsInputBufferSize = Integer.parseInt(line.getOptionValue('i'));
}
if (line.hasOption('o')) {
fsOutputBufferSize = Integer.parseInt(line.getOptionValue('o'));
}
if (line.hasOption('n')) {
seekCount = Integer.parseInt(line.getOptionValue('n'));
}
if (line.hasOption('k')) {
IntegerRange ir = IntegerRange.parse(line.getOptionValue('k'));
minKeyLen = ir.from();
maxKeyLen = ir.to();
}
if (line.hasOption('v')) {
IntegerRange ir = IntegerRange.parse(line.getOptionValue('v'));
minValLength = ir.from();
maxValLength = ir.to();
}
if (line.hasOption('b')) {
minBlockSize = Integer.parseInt(line.getOptionValue('b')) * 1024;
}
if (line.hasOption('r')) {
rootDir = line.getOptionValue('r');
}
if (line.hasOption('f')) {
file = line.getOptionValue('f');
}
if (line.hasOption('S')) {
seed = Long.parseLong(line.getOptionValue('S'));
}
if (line.hasOption('x')) {
String strOp = line.getOptionValue('x');
if (strOp.equals("r")) {
op = OP_READ;
}
else if (strOp.equals("w")) {
op = OP_CREATE;
}
else if (strOp.equals("rw")) {
op = OP_CREATE | OP_READ;
}
else {
throw new ParseException("Unknown action specifier: " + strOp);
}
}
proceed = true;
}
private void validateOptions() throws ParseException {
if (!compress.equals("none") && !compress.equals("lzo")
&& !compress.equals("gz")) {
throw new ParseException("Unknown compression scheme: " + compress);
}
if (minKeyLen >= maxKeyLen) {
throw new ParseException(
"Max key length must be greater than min key length.");
}
if (minValLength >= maxValLength) {
throw new ParseException(
"Max value length must be greater than min value length.");
}
if (minWordLen >= maxWordLen) {
throw new ParseException(
"Max word length must be greater than min word length.");
}
return;
}
private void setStopProceed() {
proceed = false;
}
public boolean doCreate() {
return (op & OP_CREATE) != 0;
}
public boolean doRead() {
return (op & OP_READ) != 0;
}
}
public static void main(String[] argv) throws IOException {
TestTFileSeek testCase = new TestTFileSeek();
MyOptions options = new MyOptions(argv);
if (options.proceed == false) {
return;
}
testCase.options = options;
testCase.setUp();
testCase.testSeeks();
testCase.tearDown();
}
}
| 15,667 | 30.025743 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSplit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import java.util.Random;
import org.junit.Assert;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.file.tfile.TFile.Reader;
import org.apache.hadoop.io.file.tfile.TFile.Writer;
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner;
public class TestTFileSplit extends TestCase {
private static String ROOT =
System.getProperty("test.build.data", "/tmp/tfile-test");
private final static int BLOCK_SIZE = 64 * 1024;
private static final String KEY = "key";
private static final String VALUE = "value";
private FileSystem fs;
private Configuration conf;
private Path path;
private Random random = new Random();
private String comparator = "memcmp";
private String outputFile = "TestTFileSplit";
void createFile(int count, String compress) throws IOException {
conf = new Configuration();
path = new Path(ROOT, outputFile + "." + compress);
fs = path.getFileSystem(conf);
FSDataOutputStream out = fs.create(path);
Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);
int nx;
for (nx = 0; nx < count; nx++) {
byte[] key = composeSortedKey(KEY, count, nx).getBytes();
byte[] value = (VALUE + nx).getBytes();
writer.append(key, value);
}
writer.close();
out.close();
}
void readFile() throws IOException {
long fileLength = fs.getFileStatus(path).getLen();
int numSplit = 10;
long splitSize = fileLength / numSplit + 1;
Reader reader =
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
long offset = 0;
long rowCount = 0;
BytesWritable key, value;
for (int i = 0; i < numSplit; ++i, offset += splitSize) {
Scanner scanner = reader.createScannerByByteRange(offset, splitSize);
int count = 0;
key = new BytesWritable();
value = new BytesWritable();
while (!scanner.atEnd()) {
scanner.entry().get(key, value);
++count;
scanner.advance();
}
scanner.close();
Assert.assertTrue(count > 0);
rowCount += count;
}
Assert.assertEquals(rowCount, reader.getEntryCount());
reader.close();
}
/* Similar to readFile(), tests the scanner created
* by record numbers rather than the offsets.
*/
void readRowSplits(int numSplits) throws IOException {
Reader reader =
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
long totalRecords = reader.getEntryCount();
for (int i=0; i<numSplits; i++) {
long startRec = i*totalRecords/numSplits;
long endRec = (i+1)*totalRecords/numSplits;
if (i == numSplits-1) {
endRec = totalRecords;
}
Scanner scanner = reader.createScannerByRecordNum(startRec, endRec);
int count = 0;
BytesWritable key = new BytesWritable();
BytesWritable value = new BytesWritable();
long x=startRec;
while (!scanner.atEnd()) {
assertEquals("Incorrect RecNum returned by scanner", scanner.getRecordNum(), x);
scanner.entry().get(key, value);
++count;
assertEquals("Incorrect RecNum returned by scanner", scanner.getRecordNum(), x);
scanner.advance();
++x;
}
scanner.close();
Assert.assertTrue(count == (endRec - startRec));
}
// make sure specifying range at the end gives zero records.
Scanner scanner = reader.createScannerByRecordNum(totalRecords, -1);
Assert.assertTrue(scanner.atEnd());
}
static String composeSortedKey(String prefix, int total, int value) {
return String.format("%s%010d", prefix, value);
}
void checkRecNums() throws IOException {
long fileLen = fs.getFileStatus(path).getLen();
Reader reader = new Reader(fs.open(path), fileLen, conf);
long totalRecs = reader.getEntryCount();
long begin = random.nextLong() % (totalRecs / 2);
if (begin < 0)
begin += (totalRecs / 2);
long end = random.nextLong() % (totalRecs / 2);
if (end < 0)
end += (totalRecs / 2);
end += (totalRecs / 2) + 1;
assertEquals("RecNum for offset=0 should be 0", 0, reader
.getRecordNumNear(0));
for (long x : new long[] { fileLen, fileLen + 1, 2 * fileLen }) {
assertEquals("RecNum for offset>=fileLen should be total entries",
totalRecs, reader.getRecordNumNear(x));
}
for (long i = 0; i < 100; ++i) {
assertEquals("Locaton to RecNum conversion not symmetric", i, reader
.getRecordNumByLocation(reader.getLocationByRecordNum(i)));
}
for (long i = 1; i < 100; ++i) {
long x = totalRecs - i;
assertEquals("Locaton to RecNum conversion not symmetric", x, reader
.getRecordNumByLocation(reader.getLocationByRecordNum(x)));
}
for (long i = begin; i < end; ++i) {
assertEquals("Locaton to RecNum conversion not symmetric", i, reader
.getRecordNumByLocation(reader.getLocationByRecordNum(i)));
}
for (int i = 0; i < 1000; ++i) {
long x = random.nextLong() % totalRecs;
if (x < 0) x += totalRecs;
assertEquals("Locaton to RecNum conversion not symmetric", x, reader
.getRecordNumByLocation(reader.getLocationByRecordNum(x)));
}
}
public void testSplit() throws IOException {
System.out.println("testSplit");
createFile(100000, Compression.Algorithm.NONE.getName());
checkRecNums();
readFile();
readRowSplits(10);
fs.delete(path, true);
createFile(500000, Compression.Algorithm.GZ.getName());
checkRecNums();
readFile();
readRowSplits(83);
fs.delete(path, true);
}
}
| 6,682 | 33.626943 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestVLong.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import java.util.Random;
import org.junit.Assert;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class TestVLong extends TestCase {
private static String ROOT =
System.getProperty("test.build.data", "/tmp/tfile-test");
private Configuration conf;
private FileSystem fs;
private Path path;
private String outputFile = "TestVLong";
@Override
public void setUp() throws IOException {
conf = new Configuration();
path = new Path(ROOT, outputFile);
fs = path.getFileSystem(conf);
if (fs.exists(path)) {
fs.delete(path, false);
}
}
@Override
public void tearDown() throws IOException {
if (fs.exists(path)) {
fs.delete(path, false);
}
}
public void testVLongByte() throws IOException {
FSDataOutputStream out = fs.create(path);
for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; ++i) {
Utils.writeVLong(out, i);
}
out.close();
Assert.assertEquals("Incorrect encoded size", (1 << Byte.SIZE) + 96, fs
.getFileStatus(
path).getLen());
FSDataInputStream in = fs.open(path);
for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; ++i) {
long n = Utils.readVLong(in);
Assert.assertEquals(n, i);
}
in.close();
fs.delete(path, false);
}
private long writeAndVerify(int shift) throws IOException {
FSDataOutputStream out = fs.create(path);
for (int i = Short.MIN_VALUE; i <= Short.MAX_VALUE; ++i) {
Utils.writeVLong(out, ((long) i) << shift);
}
out.close();
FSDataInputStream in = fs.open(path);
for (int i = Short.MIN_VALUE; i <= Short.MAX_VALUE; ++i) {
long n = Utils.readVLong(in);
Assert.assertEquals(n, ((long) i) << shift);
}
in.close();
long ret = fs.getFileStatus(path).getLen();
fs.delete(path, false);
return ret;
}
public void testVLongShort() throws IOException {
long size = writeAndVerify(0);
Assert.assertEquals("Incorrect encoded size", (1 << Short.SIZE) * 2
+ ((1 << Byte.SIZE) - 40)
* (1 << Byte.SIZE) - 128 - 32, size);
}
public void testVLong3Bytes() throws IOException {
long size = writeAndVerify(Byte.SIZE);
Assert.assertEquals("Incorrect encoded size", (1 << Short.SIZE) * 3
+ ((1 << Byte.SIZE) - 32) * (1 << Byte.SIZE) - 40 - 1, size);
}
public void testVLong4Bytes() throws IOException {
long size = writeAndVerify(Byte.SIZE * 2);
Assert.assertEquals("Incorrect encoded size", (1 << Short.SIZE) * 4
+ ((1 << Byte.SIZE) - 16) * (1 << Byte.SIZE) - 32 - 2, size);
}
public void testVLong5Bytes() throws IOException {
long size = writeAndVerify(Byte.SIZE * 3);
Assert.assertEquals("Incorrect encoded size", (1 << Short.SIZE) * 6 - 256
- 16 - 3, size);
}
private void verifySixOrMoreBytes(int bytes) throws IOException {
long size = writeAndVerify(Byte.SIZE * (bytes - 2));
Assert.assertEquals("Incorrect encoded size", (1 << Short.SIZE)
* (bytes + 1) - 256 - bytes + 1, size);
}
public void testVLong6Bytes() throws IOException {
verifySixOrMoreBytes(6);
}
public void testVLong7Bytes() throws IOException {
verifySixOrMoreBytes(7);
}
public void testVLong8Bytes() throws IOException {
verifySixOrMoreBytes(8);
}
public void testVLongRandom() throws IOException {
int count = 1024 * 1024;
long data[] = new long[count];
Random rng = new Random();
for (int i = 0; i < data.length; ++i) {
int shift = rng.nextInt(Long.SIZE) + 1;
long mask = (1L << shift) - 1;
long a = ((long) rng.nextInt()) << 32;
long b = ((long) rng.nextInt()) & 0xffffffffL;
data[i] = (a + b) & mask;
}
FSDataOutputStream out = fs.create(path);
for (int i = 0; i < data.length; ++i) {
Utils.writeVLong(out, data[i]);
}
out.close();
FSDataInputStream in = fs.open(path);
for (int i = 0; i < data.length; ++i) {
Assert.assertEquals(Utils.readVLong(in), data[i]);
}
in.close();
fs.delete(path, false);
}
}
| 5,131 | 30.679012 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/KVGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.util.Random;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.file.tfile.RandomDistribution.DiscreteRNG;
/**
* Generate random <key, value> pairs.
*/
class KVGenerator {
private final Random random;
private final byte[][] dict;
private final boolean sorted;
private final DiscreteRNG keyLenRNG, valLenRNG;
private BytesWritable lastKey;
private static final int MIN_KEY_LEN = 4;
private final byte prefix[] = new byte[MIN_KEY_LEN];
public KVGenerator(Random random, boolean sorted, DiscreteRNG keyLenRNG,
DiscreteRNG valLenRNG, DiscreteRNG wordLenRNG, int dictSize) {
this.random = random;
dict = new byte[dictSize][];
this.sorted = sorted;
this.keyLenRNG = keyLenRNG;
this.valLenRNG = valLenRNG;
for (int i = 0; i < dictSize; ++i) {
int wordLen = wordLenRNG.nextInt();
dict[i] = new byte[wordLen];
random.nextBytes(dict[i]);
}
lastKey = new BytesWritable();
fillKey(lastKey);
}
private void fillKey(BytesWritable o) {
int len = keyLenRNG.nextInt();
if (len < MIN_KEY_LEN) len = MIN_KEY_LEN;
o.setSize(len);
int n = MIN_KEY_LEN;
while (n < len) {
byte[] word = dict[random.nextInt(dict.length)];
int l = Math.min(word.length, len - n);
System.arraycopy(word, 0, o.get(), n, l);
n += l;
}
if (sorted
&& WritableComparator.compareBytes(lastKey.get(), MIN_KEY_LEN, lastKey
.getSize()
- MIN_KEY_LEN, o.get(), MIN_KEY_LEN, o.getSize() - MIN_KEY_LEN) > 0) {
incrementPrefix();
}
System.arraycopy(prefix, 0, o.get(), 0, MIN_KEY_LEN);
lastKey.set(o);
}
private void fillValue(BytesWritable o) {
int len = valLenRNG.nextInt();
o.setSize(len);
int n = 0;
while (n < len) {
byte[] word = dict[random.nextInt(dict.length)];
int l = Math.min(word.length, len - n);
System.arraycopy(word, 0, o.get(), n, l);
n += l;
}
}
private void incrementPrefix() {
for (int i = MIN_KEY_LEN - 1; i >= 0; --i) {
++prefix[i];
if (prefix[i] != 0) return;
}
throw new RuntimeException("Prefix overflown");
}
public void next(BytesWritable key, BytesWritable value, boolean dupKey) {
if (dupKey) {
key.set(lastKey);
}
else {
fillKey(key);
}
fillValue(value);
}
}
| 3,271 | 29.867925 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileLzoCodecsStreams.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import org.apache.hadoop.io.file.tfile.Compression.Algorithm;
public class TestTFileLzoCodecsStreams extends TestTFileStreams {
/**
* Test LZO compression codec, using the same test cases as in the ByteArrays.
*/
@Override
public void setUp() throws IOException {
skip = !(Algorithm.LZO.isSupported());
if (skip) {
System.out.println("Skipped");
}
init(Compression.Algorithm.LZO.getName(), "memcmp");
if (!skip)
super.setUp();
}
}
| 1,358 | 32.975 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsStreams.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
public class TestTFileNoneCodecsStreams extends TestTFileStreams {
/**
* Test non-compression codec, using the same test cases as in the ByteArrays.
*/
@Override
public void setUp() throws IOException {
init(Compression.Algorithm.NONE.getName(), "memcmp");
super.setUp();
}
}
| 1,177 | 34.69697 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileComparators.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import org.junit.Assert;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.file.tfile.TFile.Writer;
/**
*
* Byte arrays test case class using GZ compression codec, base class of none
* and LZO compression classes.
*
*/
public class TestTFileComparators extends TestCase {
private static String ROOT =
System.getProperty("test.build.data", "/tmp/tfile-test");
private final static int BLOCK_SIZE = 512;
private FileSystem fs;
private Configuration conf;
private Path path;
private FSDataOutputStream out;
private Writer writer;
private String compression = Compression.Algorithm.GZ.getName();
private String outputFile = "TFileTestComparators";
/*
* pre-sampled numbers of records in one block, based on the given the
* generated key and value strings
*/
// private int records1stBlock = 4314;
// private int records2ndBlock = 4108;
private int records1stBlock = 4480;
private int records2ndBlock = 4263;
@Override
public void setUp() throws IOException {
conf = new Configuration();
path = new Path(ROOT, outputFile);
fs = path.getFileSystem(conf);
out = fs.create(path);
}
@Override
public void tearDown() throws IOException {
fs.delete(path, true);
}
// bad comparator format
public void testFailureBadComparatorNames() throws IOException {
try {
writer = new Writer(out, BLOCK_SIZE, compression, "badcmp", conf);
Assert.fail("Failed to catch unsupported comparator names");
}
catch (Exception e) {
// noop, expecting exceptions
e.printStackTrace();
}
}
// jclass that doesn't exist
public void testFailureBadJClassNames() throws IOException {
try {
writer =
new Writer(out, BLOCK_SIZE, compression,
"jclass: some.non.existence.clazz", conf);
Assert.fail("Failed to catch unsupported comparator names");
}
catch (Exception e) {
// noop, expecting exceptions
e.printStackTrace();
}
}
// class exists but not a RawComparator
public void testFailureBadJClasses() throws IOException {
try {
writer =
new Writer(out, BLOCK_SIZE, compression,
"jclass:org.apache.hadoop.io.file.tfile.Chunk", conf);
Assert.fail("Failed to catch unsupported comparator names");
}
catch (Exception e) {
// noop, expecting exceptions
e.printStackTrace();
}
}
private void closeOutput() throws IOException {
if (writer != null) {
writer.close();
writer = null;
}
if (out != null) {
out.close();
out = null;
}
}
}
| 3,658 | 28.747967 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/KeySampler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.file.tfile.RandomDistribution.DiscreteRNG;
class KeySampler {
Random random;
int min, max;
DiscreteRNG keyLenRNG;
private static final int MIN_KEY_LEN = 4;
public KeySampler(Random random, RawComparable first, RawComparable last,
DiscreteRNG keyLenRNG) throws IOException {
this.random = random;
min = keyPrefixToInt(first);
max = keyPrefixToInt(last);
this.keyLenRNG = keyLenRNG;
}
private int keyPrefixToInt(RawComparable key) throws IOException {
byte[] b = key.buffer();
int o = key.offset();
return (b[o] & 0xff) << 24 | (b[o + 1] & 0xff) << 16
| (b[o + 2] & 0xff) << 8 | (b[o + 3] & 0xff);
}
public void next(BytesWritable key) {
key.setSize(Math.max(MIN_KEY_LEN, keyLenRNG.nextInt()));
random.nextBytes(key.get());
int n = random.nextInt(max - min) + min;
byte[] b = key.get();
b[0] = (byte) (n >> 24);
b[1] = (byte) (n >> 16);
b[2] = (byte) (n >> 8);
b[3] = (byte) n;
}
}
| 1,946 | 33.157895 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import org.junit.Assert;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.file.tfile.TFile.Reader;
import org.apache.hadoop.io.file.tfile.TFile.Writer;
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner;
public class TestTFileUnsortedByteArrays extends TestCase {
private static String ROOT =
System.getProperty("test.build.data", "/tmp/tfile-test");
private final static int BLOCK_SIZE = 512;
private final static int BUF_SIZE = 64;
private FileSystem fs;
private Configuration conf;
private Path path;
private FSDataOutputStream out;
private Writer writer;
private String compression = Compression.Algorithm.GZ.getName();
private String outputFile = "TFileTestUnsorted";
/*
* pre-sampled numbers of records in one block, based on the given the
* generated key and value strings
*/
private int records1stBlock = 4314;
private int records2ndBlock = 4108;
public void init(String compression, String outputFile,
int numRecords1stBlock, int numRecords2ndBlock) {
this.compression = compression;
this.outputFile = outputFile;
this.records1stBlock = numRecords1stBlock;
this.records2ndBlock = numRecords2ndBlock;
}
@Override
public void setUp() throws IOException {
conf = new Configuration();
path = new Path(ROOT, outputFile);
fs = path.getFileSystem(conf);
out = fs.create(path);
writer = new Writer(out, BLOCK_SIZE, compression, null, conf);
writer.append("keyZ".getBytes(), "valueZ".getBytes());
writer.append("keyM".getBytes(), "valueM".getBytes());
writer.append("keyN".getBytes(), "valueN".getBytes());
writer.append("keyA".getBytes(), "valueA".getBytes());
closeOutput();
}
@Override
public void tearDown() throws IOException {
fs.delete(path, true);
}
// we still can scan records in an unsorted TFile
public void testFailureScannerWithKeys() throws IOException {
Reader reader =
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Assert.assertFalse(reader.isSorted());
Assert.assertEquals((int) reader.getEntryCount(), 4);
try {
Scanner scanner =
reader.createScannerByKey("aaa".getBytes(), "zzz".getBytes());
Assert
.fail("Failed to catch creating scanner with keys on unsorted file.");
}
catch (RuntimeException e) {
}
finally {
reader.close();
}
}
// we still can scan records in an unsorted TFile
public void testScan() throws IOException {
Reader reader =
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Assert.assertFalse(reader.isSorted());
Assert.assertEquals((int) reader.getEntryCount(), 4);
Scanner scanner = reader.createScanner();
try {
// read key and value
byte[] kbuf = new byte[BUF_SIZE];
int klen = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf);
Assert.assertEquals(new String(kbuf, 0, klen), "keyZ");
byte[] vbuf = new byte[BUF_SIZE];
int vlen = scanner.entry().getValueLength();
scanner.entry().getValue(vbuf);
Assert.assertEquals(new String(vbuf, 0, vlen), "valueZ");
scanner.advance();
// now try get value first
vbuf = new byte[BUF_SIZE];
vlen = scanner.entry().getValueLength();
scanner.entry().getValue(vbuf);
Assert.assertEquals(new String(vbuf, 0, vlen), "valueM");
kbuf = new byte[BUF_SIZE];
klen = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf);
Assert.assertEquals(new String(kbuf, 0, klen), "keyM");
}
finally {
scanner.close();
reader.close();
}
}
// we still can scan records in an unsorted TFile
public void testScanRange() throws IOException {
Reader reader =
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Assert.assertFalse(reader.isSorted());
Assert.assertEquals((int) reader.getEntryCount(), 4);
Scanner scanner = reader.createScanner();
try {
// read key and value
byte[] kbuf = new byte[BUF_SIZE];
int klen = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf);
Assert.assertEquals(new String(kbuf, 0, klen), "keyZ");
byte[] vbuf = new byte[BUF_SIZE];
int vlen = scanner.entry().getValueLength();
scanner.entry().getValue(vbuf);
Assert.assertEquals(new String(vbuf, 0, vlen), "valueZ");
scanner.advance();
// now try get value first
vbuf = new byte[BUF_SIZE];
vlen = scanner.entry().getValueLength();
scanner.entry().getValue(vbuf);
Assert.assertEquals(new String(vbuf, 0, vlen), "valueM");
kbuf = new byte[BUF_SIZE];
klen = scanner.entry().getKeyLength();
scanner.entry().getKey(kbuf);
Assert.assertEquals(new String(kbuf, 0, klen), "keyM");
}
finally {
scanner.close();
reader.close();
}
}
public void testFailureSeek() throws IOException {
Reader reader =
new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner = reader.createScanner();
try {
// can't find ceil
try {
scanner.lowerBound("keyN".getBytes());
Assert.fail("Cannot search in a unsorted TFile!");
}
catch (Exception e) {
// noop, expecting excetions
}
finally {
}
// can't find higher
try {
scanner.upperBound("keyA".getBytes());
Assert.fail("Cannot search higher in a unsorted TFile!");
}
catch (Exception e) {
// noop, expecting excetions
}
finally {
}
// can't seek
try {
scanner.seekTo("keyM".getBytes());
Assert.fail("Cannot search a unsorted TFile!");
}
catch (Exception e) {
// noop, expecting excetions
}
finally {
}
}
finally {
scanner.close();
reader.close();
}
}
private void closeOutput() throws IOException {
if (writer != null) {
writer.close();
writer = null;
out.close();
out = null;
}
}
}
| 7,163 | 28.974895 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileLzoCodecsByteArrays.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.IOException;
import org.apache.hadoop.io.file.tfile.Compression.Algorithm;
public class TestTFileLzoCodecsByteArrays extends TestTFileByteArrays {
/**
* Test LZO compression codec, using the same test cases as in the ByteArrays.
*/
@Override
public void setUp() throws IOException {
skip = !(Algorithm.LZO.isSupported());
if (skip) {
System.out.println("Skipped");
}
// TODO: sample the generated key/value records, and put the numbers below
init(Compression.Algorithm.LZO.getName(), "memcmp", 2605, 2558);
if (!skip)
super.setUp();
}
}
| 1,455 | 33.666667 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileStreams.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.io.file.tfile;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.util.Random;
import org.junit.Assert;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.file.tfile.TFile.Reader;
import org.apache.hadoop.io.file.tfile.TFile.Writer;
import org.apache.hadoop.io.file.tfile.TFile.Reader.Scanner;
/**
*
* Streaming interfaces test case class using GZ compression codec, base class
* of none and LZO compression classes.
*
*/
public class TestTFileStreams extends TestCase {
private static String ROOT =
System.getProperty("test.build.data", "/tmp/tfile-test");
private final static int BLOCK_SIZE = 512;
private final static int K = 1024;
private final static int M = K * K;
protected boolean skip = false;
private FileSystem fs;
private Configuration conf;
private Path path;
private FSDataOutputStream out;
Writer writer;
private String compression = Compression.Algorithm.GZ.getName();
private String comparator = "memcmp";
private final String outputFile = getClass().getSimpleName();
public void init(String compression, String comparator) {
this.compression = compression;
this.comparator = comparator;
}
@Override
public void setUp() throws IOException {
conf = new Configuration();
path = new Path(ROOT, outputFile);
fs = path.getFileSystem(conf);
out = fs.create(path);
writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
}
@Override
public void tearDown() throws IOException {
if (!skip) {
try {
closeOutput();
} catch (Exception e) {
// no-op
}
fs.delete(path, true);
}
}
public void testNoEntry() throws IOException {
if (skip)
return;
closeOutput();
TestTFileByteArrays.readRecords(fs, path, 0, conf);
}
public void testOneEntryKnownLength() throws IOException {
if (skip)
return;
writeRecords(1, true, true);
TestTFileByteArrays.readRecords(fs, path, 1, conf);
}
public void testOneEntryUnknownLength() throws IOException {
if (skip)
return;
writeRecords(1, false, false);
// TODO: will throw exception at getValueLength, it's inconsistent though;
// getKeyLength returns a value correctly, though initial length is -1
TestTFileByteArrays.readRecords(fs, path, 1, conf);
}
// known key length, unknown value length
public void testOneEntryMixedLengths1() throws IOException {
if (skip)
return;
writeRecords(1, true, false);
TestTFileByteArrays.readRecords(fs, path, 1, conf);
}
// unknown key length, known value length
public void testOneEntryMixedLengths2() throws IOException {
if (skip)
return;
writeRecords(1, false, true);
TestTFileByteArrays.readRecords(fs, path, 1, conf);
}
public void testTwoEntriesKnownLength() throws IOException {
if (skip)
return;
writeRecords(2, true, true);
TestTFileByteArrays.readRecords(fs, path, 2, conf);
}
// Negative test
public void testFailureAddKeyWithoutValue() throws IOException {
if (skip)
return;
DataOutputStream dos = writer.prepareAppendKey(-1);
dos.write("key0".getBytes());
try {
closeOutput();
fail("Cannot add only a key without a value. ");
}
catch (IllegalStateException e) {
// noop, expecting an exception
}
}
public void testFailureAddValueWithoutKey() throws IOException {
if (skip)
return;
DataOutputStream outValue = null;
try {
outValue = writer.prepareAppendValue(6);
outValue.write("value0".getBytes());
fail("Cannot add a value without adding key first. ");
}
catch (Exception e) {
// noop, expecting an exception
}
finally {
if (outValue != null) {
outValue.close();
}
}
}
public void testFailureOneEntryKnownLength() throws IOException {
if (skip)
return;
DataOutputStream outKey = writer.prepareAppendKey(2);
try {
outKey.write("key0".getBytes());
fail("Specified key length mismatched the actual key length.");
}
catch (IOException e) {
// noop, expecting an exception
}
DataOutputStream outValue = null;
try {
outValue = writer.prepareAppendValue(6);
outValue.write("value0".getBytes());
}
catch (Exception e) {
// noop, expecting an exception
}
}
public void testFailureKeyTooLong() throws IOException {
if (skip)
return;
DataOutputStream outKey = writer.prepareAppendKey(2);
try {
outKey.write("key0".getBytes());
outKey.close();
Assert.fail("Key is longer than requested.");
}
catch (Exception e) {
// noop, expecting an exception
}
finally {
}
}
public void testFailureKeyTooShort() throws IOException {
if (skip)
return;
DataOutputStream outKey = writer.prepareAppendKey(4);
outKey.write("key0".getBytes());
outKey.close();
DataOutputStream outValue = writer.prepareAppendValue(15);
try {
outValue.write("value0".getBytes());
outValue.close();
Assert.fail("Value is shorter than expected.");
}
catch (Exception e) {
// noop, expecting an exception
}
finally {
}
}
public void testFailureValueTooLong() throws IOException {
if (skip)
return;
DataOutputStream outKey = writer.prepareAppendKey(4);
outKey.write("key0".getBytes());
outKey.close();
DataOutputStream outValue = writer.prepareAppendValue(3);
try {
outValue.write("value0".getBytes());
outValue.close();
Assert.fail("Value is longer than expected.");
}
catch (Exception e) {
// noop, expecting an exception
}
try {
outKey.close();
outKey.close();
}
catch (Exception e) {
Assert.fail("Second or more close() should have no effect.");
}
}
public void testFailureValueTooShort() throws IOException {
if (skip)
return;
DataOutputStream outKey = writer.prepareAppendKey(8);
try {
outKey.write("key0".getBytes());
outKey.close();
Assert.fail("Key is shorter than expected.");
}
catch (Exception e) {
// noop, expecting an exception
}
finally {
}
}
public void testFailureCloseKeyStreamManyTimesInWriter() throws IOException {
if (skip)
return;
DataOutputStream outKey = writer.prepareAppendKey(4);
try {
outKey.write("key0".getBytes());
outKey.close();
}
catch (Exception e) {
// noop, expecting an exception
}
finally {
try {
outKey.close();
}
catch (Exception e) {
// no-op
}
}
outKey.close();
outKey.close();
Assert.assertTrue("Multiple close should have no effect.", true);
}
public void testFailureKeyLongerThan64K() throws IOException {
if (skip)
return;
try {
DataOutputStream outKey = writer.prepareAppendKey(64 * K + 1);
Assert.fail("Failed to handle key longer than 64K.");
}
catch (IndexOutOfBoundsException e) {
// noop, expecting exceptions
}
closeOutput();
}
public void testFailureKeyLongerThan64K_2() throws IOException {
if (skip)
return;
DataOutputStream outKey = writer.prepareAppendKey(-1);
try {
byte[] buf = new byte[K];
Random rand = new Random();
for (int nx = 0; nx < K + 2; nx++) {
rand.nextBytes(buf);
outKey.write(buf);
}
outKey.close();
Assert.fail("Failed to handle key longer than 64K.");
}
catch (EOFException e) {
// noop, expecting exceptions
}
finally {
try {
closeOutput();
}
catch (Exception e) {
// no-op
}
}
}
public void testFailureNegativeOffset() throws IOException {
if (skip)
return;
writeRecords(2, true, true);
Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
Scanner scanner = reader.createScanner();
byte[] buf = new byte[K];
try {
scanner.entry().getKey(buf, -1);
Assert.fail("Failed to handle key negative offset.");
}
catch (Exception e) {
// noop, expecting exceptions
}
finally {
}
scanner.close();
reader.close();
}
/**
* Verify that the compressed data size is less than raw data size.
*
* @throws IOException
*/
public void testFailureCompressionNotWorking() throws IOException {
if (skip)
return;
long rawDataSize = writeRecords(10000, false, false, false);
if (!compression.equalsIgnoreCase(Compression.Algorithm.NONE.getName())) {
Assert.assertTrue(out.getPos() < rawDataSize);
}
closeOutput();
}
public void testFailureCompressionNotWorking2() throws IOException {
if (skip)
return;
long rawDataSize = writeRecords(10000, true, true, false);
if (!compression.equalsIgnoreCase(Compression.Algorithm.NONE.getName())) {
Assert.assertTrue(out.getPos() < rawDataSize);
}
closeOutput();
}
private long writeRecords(int count, boolean knownKeyLength,
boolean knownValueLength, boolean close) throws IOException {
long rawDataSize = 0;
for (int nx = 0; nx < count; nx++) {
String key = TestTFileByteArrays.composeSortedKey("key", nx);
DataOutputStream outKey =
writer.prepareAppendKey(knownKeyLength ? key.length() : -1);
outKey.write(key.getBytes());
outKey.close();
String value = "value" + nx;
DataOutputStream outValue =
writer.prepareAppendValue(knownValueLength ? value.length() : -1);
outValue.write(value.getBytes());
outValue.close();
rawDataSize +=
WritableUtils.getVIntSize(key.getBytes().length)
+ key.getBytes().length
+ WritableUtils.getVIntSize(value.getBytes().length)
+ value.getBytes().length;
}
if (close) {
closeOutput();
}
return rawDataSize;
}
private long writeRecords(int count, boolean knownKeyLength,
boolean knownValueLength) throws IOException {
return writeRecords(count, knownKeyLength, knownValueLength, true);
}
private void closeOutput() throws IOException {
if (writer != null) {
writer.close();
writer = null;
}
if (out != null) {
out.close();
out = null;
}
}
}
| 11,495 | 26.177305 | 85 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.